From e8167a2b8ceddb94e2cd550690c3c7656f984880 Mon Sep 17 00:00:00 2001 From: Joel Hendrix Date: Fri, 28 Apr 2023 09:55:43 -0700 Subject: [PATCH 01/50] Enable gocritic during linting (#20715) Enabled gocritic's evalOrder to catch dependencies on undefined behavior on return statements. Updated to latest version of golangci-lint. Fixed issue in azblob flagged by latest linter. --- eng/.golangci.yml | 10 ++++++++++ eng/pipelines/templates/variables/globals.yml | 2 +- sdk/storage/azblob/blockblob/mmf_windows.go | 4 +++- 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/eng/.golangci.yml b/eng/.golangci.yml index e4e98907a911..c02d708f5a68 100644 --- a/eng/.golangci.yml +++ b/eng/.golangci.yml @@ -3,3 +3,13 @@ run: # default is true. Enables skipping of directories: # vendor$, third_party$, testdata$, examples$, Godeps$, builtin$ skip-dirs-use-default: true + timeout: 10m + +linters: + enable: + - gocritic + +linters-settings: + gocritic: + enabled-checks: + - evalOrder diff --git a/eng/pipelines/templates/variables/globals.yml b/eng/pipelines/templates/variables/globals.yml index 071313330e97..98f4404689df 100644 --- a/eng/pipelines/templates/variables/globals.yml +++ b/eng/pipelines/templates/variables/globals.yml @@ -1,5 +1,5 @@ variables: - GoLintCLIVersion: 'v1.51.1' + GoLintCLIVersion: 'v1.52.2' Package.EnableSBOMSigning: true # Enable go native component governance detection # https://docs.opensource.microsoft.com/tools/cg/index.html diff --git a/sdk/storage/azblob/blockblob/mmf_windows.go b/sdk/storage/azblob/blockblob/mmf_windows.go index 2acef3a72f17..3f966d65b887 100644 --- a/sdk/storage/azblob/blockblob/mmf_windows.go +++ b/sdk/storage/azblob/blockblob/mmf_windows.go @@ -26,7 +26,9 @@ func newMMB(size int64) (mmb, error) { if err != nil { return nil, os.NewSyscallError("CreateFileMapping", err) } - defer syscall.CloseHandle(hMMF) + defer func() { + _ = syscall.CloseHandle(hMMF) + }() addr, err := syscall.MapViewOfFile(hMMF, access, 0, 0, uintptr(size)) if err != nil { From 86627ae445bcc14d233df97347e857fe4eac9fe3 Mon Sep 17 00:00:00 2001 From: Matias Quaranta Date: Fri, 28 Apr 2023 12:24:23 -0700 Subject: [PATCH 02/50] Cosmos DB: Enable merge support (#20716) * Adding header and value * Wiring and tests * format * Fixing value * change log --- sdk/data/azcosmos/CHANGELOG.md | 3 +++ sdk/data/azcosmos/cosmos_client.go | 1 + sdk/data/azcosmos/cosmos_client_test.go | 4 ++++ sdk/data/azcosmos/cosmos_headers.go | 1 + sdk/data/azcosmos/sdk_capabilities.go | 21 +++++++++++++++++++++ 5 files changed, 30 insertions(+) create mode 100644 sdk/data/azcosmos/sdk_capabilities.go diff --git a/sdk/data/azcosmos/CHANGELOG.md b/sdk/data/azcosmos/CHANGELOG.md index 951442859e4d..f4652730f572 100644 --- a/sdk/data/azcosmos/CHANGELOG.md +++ b/sdk/data/azcosmos/CHANGELOG.md @@ -2,6 +2,9 @@ ## 0.3.5 (2023-05-09) +### Features Added +* Added support for accounts with [merge support](https://aka.ms/cosmosdbsdksupportformerge) enabled + ### Bugs Fixed * Fixed unmarshalling error when using projections in value queries diff --git a/sdk/data/azcosmos/cosmos_client.go b/sdk/data/azcosmos/cosmos_client.go index 94c455aed1e4..41807ab416e0 100644 --- a/sdk/data/azcosmos/cosmos_client.go +++ b/sdk/data/azcosmos/cosmos_client.go @@ -395,6 +395,7 @@ func (c *Client) createRequest( req.Raw().Header.Set(headerXmsDate, time.Now().UTC().Format(http.TimeFormat)) req.Raw().Header.Set(headerXmsVersion, "2020-11-05") + req.Raw().Header.Set(cosmosHeaderSDKSupportedCapabilities, supportedCapabilitiesHeaderValue) req.SetOperationValue(operationContext) diff --git a/sdk/data/azcosmos/cosmos_client_test.go b/sdk/data/azcosmos/cosmos_client_test.go index f33e730722a2..d93f4ca93d7c 100644 --- a/sdk/data/azcosmos/cosmos_client_test.go +++ b/sdk/data/azcosmos/cosmos_client_test.go @@ -258,6 +258,10 @@ func TestCreateRequest(t *testing.T) { t.Errorf("Expected %v, but got %v", "2020-11-05", req.Raw().Header.Get(headerXmsVersion)) } + if req.Raw().Header.Get(cosmosHeaderSDKSupportedCapabilities) != supportedCapabilitiesHeaderValue { + t.Errorf("Expected %v, but got %v", supportedCapabilitiesHeaderValue, req.Raw().Header.Get(cosmosHeaderSDKSupportedCapabilities)) + } + opValue := pipelineRequestOptions{} if !req.OperationValue(&opValue) { t.Error("Expected to find operation value") diff --git a/sdk/data/azcosmos/cosmos_headers.go b/sdk/data/azcosmos/cosmos_headers.go index d054bd652b93..5c8ea72c48bb 100644 --- a/sdk/data/azcosmos/cosmos_headers.go +++ b/sdk/data/azcosmos/cosmos_headers.go @@ -33,6 +33,7 @@ const ( cosmosHeaderIsBatchRequest string = "x-ms-cosmos-is-batch-request" cosmosHeaderIsBatchAtomic string = "x-ms-cosmos-batch-atomic" cosmosHeaderIsBatchOrdered string = "x-ms-cosmos-batch-ordered" + cosmosHeaderSDKSupportedCapabilities string = "x-ms-cosmos-sdk-supportedcapabilities" headerXmsDate string = "x-ms-date" headerAuthorization string = "Authorization" headerContentType string = "Content-Type" diff --git a/sdk/data/azcosmos/sdk_capabilities.go b/sdk/data/azcosmos/sdk_capabilities.go new file mode 100644 index 000000000000..71157b5fed27 --- /dev/null +++ b/sdk/data/azcosmos/sdk_capabilities.go @@ -0,0 +1,21 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azcosmos + +import "strconv" + +type supportedCapabilities uint64 + +const ( + supportedCapabilitiesNone supportedCapabilities = 0 + supportedCapabilitiesPartitionMerge supportedCapabilities = 1 << 0 +) + +var supportedCapabilitiesHeaderValue = supportedCapabilitiesAsString() + +func supportedCapabilitiesAsString() string { + supported := supportedCapabilitiesNone + supported |= supportedCapabilitiesPartitionMerge + return strconv.FormatUint(uint64(supported), 10) +} From 8ac8c6d4616216f5e99517a832757e6619def9bb Mon Sep 17 00:00:00 2001 From: Richard Park <51494936+richardpark-msft@users.noreply.github.com> Date: Mon, 1 May 2023 11:00:48 -0700 Subject: [PATCH 03/50] [azservicebus, azeventhubs] Stress test and logging improvement (#20710) Logging improvements: * Updating the logging to print more tracing information (per-link) in prep for the bigger release coming up. * Trimming out some of the verbose logging, seeing if I can get it a bit more reasonable. Stress tests: * Add a timestamp to the log name we generate and also default to append, not overwrite. * Use 0.5 cores, 0.5GB as our baseline. Some pods use more and I'll tune them more later. --- .../azeventhubs/internal/eh/stress/deploy.ps1 | 2 +- .../eh/stress/templates/stress-test-job.yaml | 6 +- sdk/messaging/azservicebus/client_test.go | 12 +- .../azservicebus/internal/amqpLinks.go | 161 ++++++++++-------- .../azservicebus/internal/amqp_test_utils.go | 9 + .../internal/amqplinks_unit_test.go | 20 +-- sdk/messaging/azservicebus/internal/rpc.go | 3 - .../azservicebus/internal/rpc_test.go | 21 +-- .../azservicebus/internal/stress/.gitignore | 2 +- .../azservicebus/internal/stress/.helmignore | 1 + .../azservicebus/internal/stress/deploy.ps1 | 23 +++ .../internal/stress/scenarios-matrix.yaml | 24 +-- .../stress/templates/stress-test-job.yaml | 4 +- .../internal/stress/tests/finite_peeks.go | 14 +- .../stress/tests/mostly_idle_receiver.go | 2 +- .../internal/test/test_helpers.go | 25 +++ .../azservicebus/internal/utils/logger.go | 37 ++++ .../azservicebus/internal/utils/retrier.go | 10 +- .../internal/utils/retrier_test.go | 10 +- sdk/messaging/azservicebus/receiver.go | 20 +-- .../azservicebus/receiver_simulated_test.go | 6 +- sdk/messaging/azservicebus/receiver_test.go | 18 +- .../azservicebus/receiver_unit_test.go | 8 +- 23 files changed, 272 insertions(+), 166 deletions(-) create mode 100644 sdk/messaging/azservicebus/internal/utils/logger.go diff --git a/sdk/messaging/azeventhubs/internal/eh/stress/deploy.ps1 b/sdk/messaging/azeventhubs/internal/eh/stress/deploy.ps1 index 2abdb8722ec9..79cd01e79509 100644 --- a/sdk/messaging/azeventhubs/internal/eh/stress/deploy.ps1 +++ b/sdk/messaging/azeventhubs/internal/eh/stress/deploy.ps1 @@ -4,7 +4,7 @@ Set-Location $PSScriptRoot function deployUsingLocalAddons() { $azureSDKToolsRoot="" $stressTestAddonsFolder = "$azureSDKToolsRoot/tools/stress-cluster/cluster/kubernetes/stress-test-addons" - $clusterResourceGroup = " set -ex; mkdir -p "$DEBUG_SHARE"; - /app/stress "{{.Stress.testTarget}}" "-rounds" "{{.Stress.rounds}}" "-prefetch" "{{.Stress.prefetch}}" "{{.Stress.verbose}}" "-sleepAfter" "{{.Stress.sleepAfter}}" | tee "${DEBUG_SHARE}/{{ .Stress.Scenario }}.log"; + /app/stress "{{.Stress.testTarget}}" "-rounds" "{{.Stress.rounds}}" "-prefetch" "{{.Stress.prefetch}}" "{{.Stress.verbose}}" "-sleepAfter" "{{.Stress.sleepAfter}}" | tee -a "${DEBUG_SHARE}/{{ .Stress.Scenario }}-`date +%s`.log"; # Pulls the image on pod start, always. We tend to push to the same image and tag over and over again # when iterating, so this is a must. imagePullPolicy: Always @@ -33,8 +33,8 @@ spec: # just uses 'limits' for both. resources: limits: - memory: "1.5Gi" - cpu: "1" + memory: "0.5Gi" + cpu: "0.5" {{- include "stress-test-addons.container-env" . | nindent 6 }} {{- end -}} diff --git a/sdk/messaging/azservicebus/client_test.go b/sdk/messaging/azservicebus/client_test.go index fd7e7bf37732..e4eef534558a 100644 --- a/sdk/messaging/azservicebus/client_test.go +++ b/sdk/messaging/azservicebus/client_test.go @@ -442,14 +442,16 @@ func TestClientUnauthorizedCreds(t *testing.T) { }) t.Run("invalid identity creds", func(t *testing.T) { - tenantID := os.Getenv("AZURE_TENANT_ID") - clientID := os.Getenv("AZURE_CLIENT_ID") - endpoint := os.Getenv("SERVICEBUS_ENDPOINT") + identityVars := test.GetIdentityVars(t) - cliCred, err := azidentity.NewClientSecretCredential(tenantID, clientID, "bogus-client-secret", nil) + if identityVars == nil { + return + } + + cliCred, err := azidentity.NewClientSecretCredential(identityVars.TenantID, identityVars.ClientID, "bogus-client-secret", nil) require.NoError(t, err) - client, err := NewClient(endpoint, cliCred, nil) + client, err := NewClient(identityVars.Endpoint, cliCred, nil) require.NoError(t, err) defer test.RequireClose(t, client) diff --git a/sdk/messaging/azservicebus/internal/amqpLinks.go b/sdk/messaging/azservicebus/internal/amqpLinks.go index c187eacaf51b..a7d0590fc669 100644 --- a/sdk/messaging/azservicebus/internal/amqpLinks.go +++ b/sdk/messaging/azservicebus/internal/amqpLinks.go @@ -59,6 +59,13 @@ type AMQPLinks interface { // ClosedPermanently is true if AMQPLinks.Close(ctx, true) has been called. ClosedPermanently() bool + + // Writef logs a message, with a prefix that represents the AMQPLinks instance + // for better traceability. + Writef(evt azlog.Event, format string, args ...any) + + // Prefix is the current logging prefix, usable for logging and continuity. + Prefix() string } // AMQPLinksImpl manages the set of AMQP links (and detritus) typically needed to work @@ -107,7 +114,7 @@ type AMQPLinksImpl struct { ns NamespaceForAMQPLinks - name string + utils.Logger } // CreateLinkFunc creates the links, using the given session. Typically you'll only create either an @@ -132,6 +139,7 @@ func NewAMQPLinks(args NewAMQPLinksArgs) AMQPLinks { closedPermanently: false, getRecoveryKindFunc: args.GetRecoveryKindFunc, ns: args.NS, + Logger: utils.NewLogger(), } return l @@ -145,7 +153,7 @@ func (links *AMQPLinksImpl) ManagementPath() string { // recoverLink will recycle all associated links (mgmt, receiver, sender and session) // and recreate them using the link.linkCreator function. func (links *AMQPLinksImpl) recoverLink(ctx context.Context, theirLinkRevision LinkID) error { - log.Writef(exported.EventConn, "Recovering link only") + links.Writef(exported.EventConn, "Recovering link only") links.mu.RLock() closedPermanently := links.closedPermanently @@ -190,40 +198,44 @@ func (links *AMQPLinksImpl) RecoverIfNeeded(ctx context.Context, theirID LinkID, return nil } - log.Writef(exported.EventConn, "[%s] Recovering link for error %s", links.name, origErr.Error()) + links.Writef(exported.EventConn, "Recovering link for error %s", origErr.Error()) rk := links.getRecoveryKindFunc(origErr) if rk == RecoveryKindLink { + oldPrefix := links.Prefix() + if err := links.recoverLink(ctx, theirID); err != nil { - azlog.Writef(exported.EventConn, "[%s] Error when recovering link for recovery: %s", links.name, err) + links.Writef(exported.EventConn, "Error when recovering link for recovery: %s", err) return err } - log.Writef(exported.EventConn, "[%s] Recovered links", links.name) + links.Writef(exported.EventConn, "Recovered links (old: %s)", oldPrefix) return nil } else if rk == RecoveryKindConn { + oldPrefix := links.Prefix() + if err := links.recoverConnection(ctx, theirID); err != nil { - log.Writef(exported.EventConn, "[%s] failed to recreate connection: %s", links.name, err.Error()) + links.Writef(exported.EventConn, "failed to recreate connection: %s", err.Error()) return err } - log.Writef(exported.EventConn, "[%s] Recovered connection and links", links.name) + links.Writef(exported.EventConn, "Recovered connection and links (old: %s)", oldPrefix) return nil } - log.Writef(exported.EventConn, "[%s] Recovered, no action needed", links.name) + links.Writef(exported.EventConn, "Recovered, no action needed") return nil } func (links *AMQPLinksImpl) recoverConnection(ctx context.Context, theirID LinkID) error { - log.Writef(exported.EventConn, "Recovering connection (and links)") + links.Writef(exported.EventConn, "Recovering connection (and links)") links.mu.Lock() defer links.mu.Unlock() if theirID.Link == links.id.Link { - log.Writef(exported.EventConn, "closing old link: current:%v, old:%v", links.id, theirID) + links.Writef(exported.EventConn, "closing old link: current:%v, old:%v", links.id, theirID) // we're clearing out this link because the connection is about to get recreated. So we can // safely ignore any problems here, we're just trying to make sure the state is reset. @@ -233,7 +245,7 @@ func (links *AMQPLinksImpl) recoverConnection(ctx context.Context, theirID LinkI created, err := links.ns.Recover(ctx, uint64(theirID.Conn)) if err != nil { - log.Writef(exported.EventConn, "Recover connection failure: %s", err) + links.Writef(exported.EventConn, "Recover connection failure: %s", err) return err } @@ -243,7 +255,7 @@ func (links *AMQPLinksImpl) recoverConnection(ctx context.Context, theirID LinkI // (if it wasn't the same then we've already recovered and created a new link, // so no recovery would be needed) if created || theirID.Link == links.id.Link { - log.Writef(exported.EventConn, "recreating link: c: %v, current:%v, old:%v", created, links.id, theirID) + links.Writef(exported.EventConn, "recreating link: c: %v, current:%v, old:%v", created, links.id, theirID) // best effort close, the connection these were built on is gone. _ = links.closeWithoutLocking(ctx, false) @@ -303,21 +315,21 @@ func (l *AMQPLinksImpl) Get(ctx context.Context) (*LinksWithID, error) { }, nil } -func (l *AMQPLinksImpl) Retry(ctx context.Context, eventName log.Event, operation string, fn RetryWithLinksFn, o exported.RetryOptions) error { +func (links *AMQPLinksImpl) Retry(ctx context.Context, eventName log.Event, operation string, fn RetryWithLinksFn, o exported.RetryOptions) error { var lastID LinkID didQuickRetry := false isFatalErrorFunc := func(err error) bool { - return l.getRecoveryKindFunc(err) == RecoveryKindFatal + return links.getRecoveryKindFunc(err) == RecoveryKindFatal } - return utils.Retry(ctx, eventName, operation, func(ctx context.Context, args *utils.RetryFnArgs) error { - if err := l.RecoverIfNeeded(ctx, lastID, args.LastErr); err != nil { + return utils.Retry(ctx, eventName, links.Prefix()+"("+operation+")", func(ctx context.Context, args *utils.RetryFnArgs) error { + if err := links.RecoverIfNeeded(ctx, lastID, args.LastErr); err != nil { return err } - linksWithVersion, err := l.Get(ctx) + linksWithVersion, err := links.Get(ctx) if err != nil { return err @@ -345,7 +357,7 @@ func (l *AMQPLinksImpl) Retry(ctx context.Context, eventName log.Event, operatio // Whereas normally you'd do (for non-detach errors): // 0th attempt // (actual retries) - log.Writef(exported.EventConn, "(%s) Link was previously detached. Attempting quick reconnect to recover from error: %s", operation, err.Error()) + links.Writef(exported.EventConn, "(%s) Link was previously detached. Attempting quick reconnect to recover from error: %s", operation, err.Error()) didQuickRetry = true args.ResetAttempts() } @@ -387,29 +399,29 @@ func (l *AMQPLinksImpl) Close(ctx context.Context, permanent bool) error { // eats the cost of recovery, instead of doing it immediately. This is useful // if you're trying to exit out of a function quickly but still need to react // to a returned error. -func (l *AMQPLinksImpl) CloseIfNeeded(ctx context.Context, err error) RecoveryKind { - l.mu.Lock() - defer l.mu.Unlock() +func (links *AMQPLinksImpl) CloseIfNeeded(ctx context.Context, err error) RecoveryKind { + links.mu.Lock() + defer links.mu.Unlock() if IsCancelError(err) { - log.Writef(exported.EventConn, "[%s] No close needed for cancellation", l.name) + links.Writef(exported.EventConn, "No close needed for cancellation") return RecoveryKindNone } - rk := l.getRecoveryKindFunc(err) + rk := links.getRecoveryKindFunc(err) switch rk { case RecoveryKindLink: - log.Writef(exported.EventConn, "[%s] Closing links for error %s", l.name, err.Error()) - _ = l.closeWithoutLocking(ctx, false) + links.Writef(exported.EventConn, "Closing links for error %s", err.Error()) + _ = links.closeWithoutLocking(ctx, false) return rk case RecoveryKindFatal: - log.Writef(exported.EventConn, "[%s] Fatal error cleanup", l.name) + links.Writef(exported.EventConn, "Fatal error cleanup") fallthrough case RecoveryKindConn: - log.Writef(exported.EventConn, "[%s] Closing connection AND links for error %s", l.name, err.Error()) - _ = l.closeWithoutLocking(ctx, false) - _ = l.ns.Close(false) + links.Writef(exported.EventConn, "Closing connection AND links for error %s", err.Error()) + _ = links.closeWithoutLocking(ctx, false) + _ = links.ns.Close(false) return rk case RecoveryKindNone: return rk @@ -419,46 +431,46 @@ func (l *AMQPLinksImpl) CloseIfNeeded(ctx context.Context, err error) RecoveryKi } // initWithoutLocking will create a new link, unconditionally. -func (l *AMQPLinksImpl) initWithoutLocking(ctx context.Context) error { - tmpCancelAuthRefreshLink, _, err := l.ns.NegotiateClaim(ctx, l.entityPath) +func (links *AMQPLinksImpl) initWithoutLocking(ctx context.Context) error { + tmpCancelAuthRefreshLink, _, err := links.ns.NegotiateClaim(ctx, links.entityPath) if err != nil { - if err := l.closeWithoutLocking(ctx, false); err != nil { - log.Writef(exported.EventConn, "Failure during link cleanup after negotiateClaim: %s", err.Error()) + if err := links.closeWithoutLocking(ctx, false); err != nil { + links.Writef(exported.EventConn, "Failure during link cleanup after negotiateClaim: %s", err.Error()) } return err } - l.cancelAuthRefreshLink = tmpCancelAuthRefreshLink + links.cancelAuthRefreshLink = tmpCancelAuthRefreshLink - tmpCancelAuthRefreshMgmtLink, _, err := l.ns.NegotiateClaim(ctx, l.managementPath) + tmpCancelAuthRefreshMgmtLink, _, err := links.ns.NegotiateClaim(ctx, links.managementPath) if err != nil { - if err := l.closeWithoutLocking(ctx, false); err != nil { - log.Writef(exported.EventConn, "Failure during link cleanup after negotiate claim for mgmt link: %s", err.Error()) + if err := links.closeWithoutLocking(ctx, false); err != nil { + links.Writef(exported.EventConn, "Failure during link cleanup after negotiate claim for mgmt link: %s", err.Error()) } return err } - l.cancelAuthRefreshMgmtLink = tmpCancelAuthRefreshMgmtLink + links.cancelAuthRefreshMgmtLink = tmpCancelAuthRefreshMgmtLink - tmpSession, cr, err := l.ns.NewAMQPSession(ctx) + tmpSession, cr, err := links.ns.NewAMQPSession(ctx) if err != nil { - if err := l.closeWithoutLocking(ctx, false); err != nil { - log.Writef(exported.EventConn, "Failure during link cleanup after creating AMQP session: %s", err.Error()) + if err := links.closeWithoutLocking(ctx, false); err != nil { + links.Writef(exported.EventConn, "Failure during link cleanup after creating AMQP session: %s", err.Error()) } return err } - l.session = tmpSession - l.id.Conn = cr + links.session = tmpSession + links.id.Conn = cr - tmpSender, tmpReceiver, err := l.createLink(ctx, l.session) + tmpSender, tmpReceiver, err := links.createLink(ctx, links.session) if err != nil { - if err := l.closeWithoutLocking(ctx, false); err != nil { - log.Writef(exported.EventConn, "Failure during link cleanup after creating link: %s", err.Error()) + if err := links.closeWithoutLocking(ctx, false); err != nil { + links.Writef(exported.EventConn, "Failure during link cleanup after creating link: %s", err.Error()) } return err } @@ -467,28 +479,29 @@ func (l *AMQPLinksImpl) initWithoutLocking(ctx context.Context) error { panic("Both tmpReceiver and tmpSender are nil") } - l.Sender, l.Receiver = tmpSender, tmpReceiver + links.Sender, links.Receiver = tmpSender, tmpReceiver - tmpRPCLink, err := l.ns.NewRPCLink(ctx, l.ManagementPath()) + tmpRPCLink, err := links.ns.NewRPCLink(ctx, links.ManagementPath()) if err != nil { - if err := l.closeWithoutLocking(ctx, false); err != nil { - log.Writef("Failure during link cleanup after creating mgmt client: %s", err.Error()) + if err := links.closeWithoutLocking(ctx, false); err != nil { + links.Writef(exported.EventConn, "Failure during link cleanup after creating mgmt client: %s", err.Error()) } return err } - l.RPCLink = tmpRPCLink - l.id.Link++ + links.RPCLink = tmpRPCLink + links.id.Link++ - if l.Sender != nil { - linkName := l.Sender.LinkName() - l.name = fmt.Sprintf("c:%d, l:%d, s:name:%s", l.id.Conn, l.id.Link, linkName) - } else if l.Receiver != nil { - l.name = fmt.Sprintf("c:%d, l:%d, r:name:%s", l.id.Conn, l.id.Link, l.Receiver.LinkName()) + if links.Sender != nil { + linkName := links.Sender.LinkName() + links.SetPrefix("c:%d, l:%d, s:name:%0.6s", links.id.Conn, links.id.Link, linkName) + } else if links.Receiver != nil { + linkName := links.Receiver.LinkName() + links.SetPrefix("c:%d, l:%d, r:name:%0.6s", links.id.Conn, links.id.Link, linkName) } - log.Writef(exported.EventConn, "[%s] Links created", l.name) + links.Writef(exported.EventConn, "Links created") return nil } @@ -502,39 +515,39 @@ func (l *AMQPLinksImpl) initWithoutLocking(ctx context.Context) error { // Regardless of cancellation or Close() call failures, all local state will be cleaned up. // // NOTE: No locking is done in this function, call `Close` if you require locking. -func (l *AMQPLinksImpl) closeWithoutLocking(ctx context.Context, permanent bool) error { - if l.closedPermanently { +func (links *AMQPLinksImpl) closeWithoutLocking(ctx context.Context, permanent bool) error { + if links.closedPermanently { return nil } - log.Writef(exported.EventConn, "[%s] Links closing (permanent: %v)", l.name, permanent) + links.Writef(exported.EventConn, "Links closing (permanent: %v)", permanent) defer func() { if permanent { - l.closedPermanently = true + links.closedPermanently = true } }() var messages []string - if l.cancelAuthRefreshLink != nil { - l.cancelAuthRefreshLink() - l.cancelAuthRefreshLink = nil + if links.cancelAuthRefreshLink != nil { + links.cancelAuthRefreshLink() + links.cancelAuthRefreshLink = nil } - if l.cancelAuthRefreshMgmtLink != nil { - l.cancelAuthRefreshMgmtLink() - l.cancelAuthRefreshMgmtLink = nil + if links.cancelAuthRefreshMgmtLink != nil { + links.cancelAuthRefreshMgmtLink() + links.cancelAuthRefreshMgmtLink = nil } closeables := []struct { name string instance amqpwrap.Closeable }{ - {"Sender", l.Sender}, - {"Receiver", l.Receiver}, - {"Session", l.session}, - {"RPC", l.RPCLink}, + {"Sender", links.Sender}, + {"Receiver", links.Receiver}, + {"Session", links.session}, + {"RPC", links.RPCLink}, } wasCancelled := false @@ -546,6 +559,8 @@ func (l *AMQPLinksImpl) closeWithoutLocking(ctx context.Context, permanent bool) continue } + links.Writef(exported.EventConn, "Closing %s", c.name) + if err := c.instance.Close(ctx); err != nil { if IsCancelError(err) { wasCancelled = true @@ -555,7 +570,7 @@ func (l *AMQPLinksImpl) closeWithoutLocking(ctx context.Context, permanent bool) } } - l.Sender, l.Receiver, l.session, l.RPCLink = nil, nil, nil, nil + links.Sender, links.Receiver, links.session, links.RPCLink = nil, nil, nil, nil if wasCancelled { return ctx.Err() diff --git a/sdk/messaging/azservicebus/internal/amqp_test_utils.go b/sdk/messaging/azservicebus/internal/amqp_test_utils.go index 5b4680769ae2..ef336e3f2650 100644 --- a/sdk/messaging/azservicebus/internal/amqp_test_utils.go +++ b/sdk/messaging/azservicebus/internal/amqp_test_utils.go @@ -8,6 +8,7 @@ import ( "fmt" "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + azlog "github.com/Azure/azure-sdk-for-go/sdk/internal/log" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/amqpwrap" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp" @@ -207,6 +208,14 @@ func (l *FakeAMQPLinks) Retry(ctx context.Context, eventName log.Event, operatio return fn(ctx, lwr, &utils.RetryFnArgs{}) } +func (l *FakeAMQPLinks) Writef(evt azlog.Event, format string, args ...any) { + log.Writef(evt, "[prefix] "+format, args...) +} + +func (l *FakeAMQPLinks) Prefix() string { + return "prefix" +} + func (l *FakeAMQPLinks) Close(ctx context.Context, permanently bool) error { if permanently { l.permanently = true diff --git a/sdk/messaging/azservicebus/internal/amqplinks_unit_test.go b/sdk/messaging/azservicebus/internal/amqplinks_unit_test.go index 5fdd1f3b894a..70e06658f287 100644 --- a/sdk/messaging/azservicebus/internal/amqplinks_unit_test.go +++ b/sdk/messaging/azservicebus/internal/amqplinks_unit_test.go @@ -88,7 +88,7 @@ func TestAMQPLinksRetriesUnit(t *testing.T) { logMessages := endLogging() if testData.ExpectReset { - require.Contains(t, logMessages, fmt.Sprintf("[azsb.Conn] (OverallOperation) Link was previously detached. Attempting quick reconnect to recover from error: %s", err.Error())) + require.Contains(t, logMessages, fmt.Sprintf("[azsb.Conn] [c:100, l:1, s:name:sender] (OverallOperation) Link was previously detached. Attempting quick reconnect to recover from error: %s", err.Error())) } else { for _, msg := range logMessages { require.NotContains(t, msg, "Link was previously detached") @@ -126,11 +126,11 @@ func TestAMQPLinks_Logging(t *testing.T) { actualLogs := endCapture() expectedLogs := []string{ - "[azsb.Conn] [] Recovering link for error amqp: link closed", + "[azsb.Conn] Recovering link for error amqp: link closed", "[azsb.Conn] Recovering link only", - "[azsb.Conn] [] Links closing (permanent: false)", - "[azsb.Conn] [c:100, l:1, r:name:fakelink] Links created", - "[azsb.Conn] [c:100, l:1, r:name:fakelink] Recovered links"} + "[azsb.Conn] Links closing (permanent: false)", + "[azsb.Conn] [c:100, l:1, r:name:fakeli] Links created", + "[azsb.Conn] [c:100, l:1, r:name:fakeli] Recovered links (old: )"} require.Equal(t, expectedLogs, actualLogs) }) @@ -161,14 +161,14 @@ func TestAMQPLinks_Logging(t *testing.T) { actualLogs := endCapture() expectedLogs := []string{ - "[azsb.Conn] [] Recovering link for error amqp: connection closed", + "[azsb.Conn] Recovering link for error amqp: connection closed", "[azsb.Conn] Recovering connection (and links)", "[azsb.Conn] closing old link: current:{0 0}, old:{0 0}", - "[azsb.Conn] [] Links closing (permanent: false)", + "[azsb.Conn] Links closing (permanent: false)", "[azsb.Conn] recreating link: c: true, current:{0 0}, old:{0 0}", - "[azsb.Conn] [] Links closing (permanent: false)", - "[azsb.Conn] [c:101, l:1, r:name:fakelink] Links created", - "[azsb.Conn] [c:101, l:1, r:name:fakelink] Recovered connection and links"} + "[azsb.Conn] Links closing (permanent: false)", + "[azsb.Conn] [c:101, l:1, r:name:fakeli] Links created", + "[azsb.Conn] [c:101, l:1, r:name:fakeli] Recovered connection and links (old: )"} require.Equal(t, expectedLogs, actualLogs) }) diff --git a/sdk/messaging/azservicebus/internal/rpc.go b/sdk/messaging/azservicebus/internal/rpc.go index a69c37c4a248..2dbb180b54a1 100644 --- a/sdk/messaging/azservicebus/internal/rpc.go +++ b/sdk/messaging/azservicebus/internal/rpc.go @@ -148,13 +148,10 @@ func NewRPCLink(ctx context.Context, args RPCLinkArgs) (amqpwrap.RPCLink, error) return link, nil } -const responseRouterShutdownMessage = "Response router has shut down" - // responseRouter is responsible for taking any messages received on the 'response' // link and forwarding it to the proper channel. The channel is being select'd by the // original `RPC` call. func (l *rpcLink) responseRouter() { - defer azlog.Writef(l.logEvent, responseRouterShutdownMessage) defer close(l.responseRouterClosed) for { diff --git a/sdk/messaging/azservicebus/internal/rpc_test.go b/sdk/messaging/azservicebus/internal/rpc_test.go index 4bf5a2865278..6ca286c85d9a 100644 --- a/sdk/messaging/azservicebus/internal/rpc_test.go +++ b/sdk/messaging/azservicebus/internal/rpc_test.go @@ -8,7 +8,6 @@ import ( "errors" "net" "testing" - "time" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/amqpwrap" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp" @@ -36,10 +35,6 @@ func TestRPCLinkNonErrorRequiresRecovery(t *testing.T) { defer func() { require.NoError(t, link.Close(context.Background())) }() - messagesCh := make(chan string, 10000) - endCapture := test.CaptureLogsForTestWithChannel(messagesCh, false) - defer endCapture() - responses := []*rpcTestResp{ // this error requires recovery (in this case, connection but there's no // distinction between types in RPCLink) @@ -53,23 +48,11 @@ func TestRPCLinkNonErrorRequiresRecovery(t *testing.T) { }) require.Nil(t, resp) - // (give the response router a teeny bit to shut down) - time.Sleep(500 * time.Millisecond) + linkImpl := link.(*rpcLink) + <-linkImpl.responseRouterClosed var netOpError net.Error require.ErrorAs(t, err, &netOpError) - -LogLoop: - for { - select { - case msg := <-messagesCh: - if msg == "[rpctesting] "+responseRouterShutdownMessage { - break LogLoop - } - default: - require.Fail(t, "RPC router never shut down") - } - } } func TestRPCLinkNonErrorRequiresNoRecovery(t *testing.T) { diff --git a/sdk/messaging/azservicebus/internal/stress/.gitignore b/sdk/messaging/azservicebus/internal/stress/.gitignore index fcef718cad3c..099d255e217d 100644 --- a/sdk/messaging/azservicebus/internal/stress/.gitignore +++ b/sdk/messaging/azservicebus/internal/stress/.gitignore @@ -3,4 +3,4 @@ stress.exe logs generatedValues.yaml deploy-test-*.ps1 - +*.log diff --git a/sdk/messaging/azservicebus/internal/stress/.helmignore b/sdk/messaging/azservicebus/internal/stress/.helmignore index 43294410b6b0..d7c1ed9789e0 100644 --- a/sdk/messaging/azservicebus/internal/stress/.helmignore +++ b/sdk/messaging/azservicebus/internal/stress/.helmignore @@ -3,3 +3,4 @@ stress.exe .env Dockerfile *.go +*.log diff --git a/sdk/messaging/azservicebus/internal/stress/deploy.ps1 b/sdk/messaging/azservicebus/internal/stress/deploy.ps1 index 8369fb669503..e1026202066f 100644 --- a/sdk/messaging/azservicebus/internal/stress/deploy.ps1 +++ b/sdk/messaging/azservicebus/internal/stress/deploy.ps1 @@ -1,2 +1,25 @@ Set-Location $PSScriptRoot + +function deployUsingLocalAddons() { + $azureSDKToolsRoot="" + $stressTestAddonsFolder = "$azureSDKToolsRoot/tools/stress-cluster/cluster/kubernetes/stress-test-addons" + $clusterResourceGroup = "" + $clusterSubscription = "" + $helmEnv = "pg2" + + if (-not (Get-ChildItem $stressTestAddonsFolder)) { + Write-Host "Can't find the the new stress test adons folder at $stressTestAddonsFolder" + return + } + + pwsh "$azureSDKToolsRoot/eng/common/scripts/stress-testing/deploy-stress-tests.ps1" ` + -LocalAddonsPath "$stressTestAddonsFolder" ` + -clusterGroup "$clusterResourceGroup" ` + -subscription "$clusterSubscription" ` + -Environment $helmEnv ` + -Login ` + -PushImages +} + +#deployUsingLocalAddons pwsh "../../../../../eng/common/scripts/stress-testing/deploy-stress-tests.ps1" -Login -PushImages @args diff --git a/sdk/messaging/azservicebus/internal/stress/scenarios-matrix.yaml b/sdk/messaging/azservicebus/internal/stress/scenarios-matrix.yaml index bfca4a7af5f7..4c095d870e01 100644 --- a/sdk/messaging/azservicebus/internal/stress/scenarios-matrix.yaml +++ b/sdk/messaging/azservicebus/internal/stress/scenarios-matrix.yaml @@ -12,46 +12,46 @@ matrix: scenarios: constantDetach: testTarget: constantDetach - memory: "1.5Gi" + memory: "0.5Gi" constantDetachmentSender: testTarget: constantDetachmentSender - memory: "1.5Gi" + memory: "0.5Gi" emptySessions: testTarget: emptySessions memory: "1.0Gi" finitePeeks: testTarget: finitePeeks - memory: "1.5Gi" + memory: "0.5Gi" finiteSendAndReceive: testTarget: finiteSendAndReceive - memory: "1.5Gi" + memory: "0.5Gi" finiteSessions: testTarget: finiteSessions memory: "4Gi" idleFastReconnect: testTarget: idleFastReconnect - memory: "1.5Gi" + memory: "0.5Gi" infiniteSendAndReceive: testTarget: infiniteSendAndReceive - memory: "1.5Gi" + memory: "0.5Gi" infiniteSendAndReceiveWithChaos: testTarget: infiniteSendAndReceive # this value is injected as a label value in templates/deploy-job.yaml # this'll activate our standard chaos policy, which is at the bottom of that file. chaos: "true" - memory: "1.5Gi" + memory: "0.5Gi" longRunningRenewLock: testTarget: longRunningRenewLock - memory: "1.5Gi" + memory: "0.5Gi" mostlyIdleReceiver: testTarget: mostlyIdleReceiver - memory: "1.5Gi" + memory: "0.5Gi" rapidOpenClose: testTarget: rapidOpenClose - memory: "1.5Gi" + memory: "0.5Gi" receiveCancellation: testTarget: receiveCancellation - memory: "1.5Gi" + memory: "0.5Gi" sendAndReceiveDrain: testTarget: sendAndReceiveDrain - memory: "1.5Gi" + memory: "0.5Gi" diff --git a/sdk/messaging/azservicebus/internal/stress/templates/stress-test-job.yaml b/sdk/messaging/azservicebus/internal/stress/templates/stress-test-job.yaml index a2d1c74d746d..b183e545ffd8 100644 --- a/sdk/messaging/azservicebus/internal/stress/templates/stress-test-job.yaml +++ b/sdk/messaging/azservicebus/internal/stress/templates/stress-test-job.yaml @@ -17,7 +17,7 @@ spec: - > set -ex; mkdir -p "$DEBUG_SHARE"; - /app/stress tests "{{ .Stress.testTarget }}" | tee "${DEBUG_SHARE}/{{ .Stress.Scenario }}.log"; + /app/stress tests "{{ .Stress.testTarget }}" | tee -a "${DEBUG_SHARE}/{{ .Stress.Scenario }}-`date +%s`.log"; # Pulls the image on pod start, always. We tend to push to the same image and tag over and over again # when iterating, so this is a must. imagePullPolicy: Always @@ -29,7 +29,7 @@ spec: resources: limits: memory: {{.Stress.memory }} - cpu: "1" + cpu: "0.5" {{- include "stress-test-addons.container-env" . | nindent 6 }} {{- end -}} diff --git a/sdk/messaging/azservicebus/internal/stress/tests/finite_peeks.go b/sdk/messaging/azservicebus/internal/stress/tests/finite_peeks.go index ca0dc64ae33c..ba8735bf536c 100644 --- a/sdk/messaging/azservicebus/internal/stress/tests/finite_peeks.go +++ b/sdk/messaging/azservicebus/internal/stress/tests/finite_peeks.go @@ -28,11 +28,13 @@ func FinitePeeks(remainingArgs []string) { sender, err := client.NewSender(queueName, nil) sc.PanicOnError("failed to create sender", err) + log.Printf("Sending a single message") err = sender.SendMessage(sc.Context, &azservicebus.Message{ Body: []byte("peekable message"), }, nil) sc.PanicOnError("failed to send message", err) + log.Printf("Closing sender") _ = sender.Close(sc.Context) receiver, err := client.NewReceiverForQueue(queueName, nil) @@ -51,9 +53,13 @@ func FinitePeeks(remainingArgs []string) { sc.PanicOnError("failed to abandon message", receiver.AbandonMessage(sc.Context, tmp[0], nil)) - for i := 0; i < 10000; i++ { - log.Printf("Sleeping for 1 second before iteration %d", i) - time.Sleep(time.Second) + const maxPeeks = 10000 + const peekSleep = 500 * time.Millisecond + + log.Printf("Now peeking %d times, every %dms", maxPeeks, peekSleep/time.Millisecond) + + for i := 1; i <= maxPeeks; i++ { + time.Sleep(peekSleep) seqNum := int64(0) @@ -65,4 +71,6 @@ func FinitePeeks(remainingArgs []string) { receiverStats.AddReceived(int32(1)) } + + log.Printf("Done, peeked %d times", maxPeeks) } diff --git a/sdk/messaging/azservicebus/internal/stress/tests/mostly_idle_receiver.go b/sdk/messaging/azservicebus/internal/stress/tests/mostly_idle_receiver.go index d707f1edd747..1d9b40c23c1d 100644 --- a/sdk/messaging/azservicebus/internal/stress/tests/mostly_idle_receiver.go +++ b/sdk/messaging/azservicebus/internal/stress/tests/mostly_idle_receiver.go @@ -83,7 +83,7 @@ func MostlyIdleReceiver(remainingArgs []string) { messages, err := receiver.ReceiveMessages(sc.Context, 1, nil) sc.PanicOnError(fmt.Sprintf("failed receiving messages for duration %s", duration), err) - log.Printf("Received messages %#v", messages) + log.Printf("Received %d messages", len(messages)) stats.AddReceived(int32(len(messages))) for _, msg := range messages { diff --git a/sdk/messaging/azservicebus/internal/test/test_helpers.go b/sdk/messaging/azservicebus/internal/test/test_helpers.go index 7a1fb62812cc..49636ec457ed 100644 --- a/sdk/messaging/azservicebus/internal/test/test_helpers.go +++ b/sdk/messaging/azservicebus/internal/test/test_helpers.go @@ -67,6 +67,31 @@ func GetConnectionStringListenOnly(t *testing.T) string { return getEnvOrSkipTest(t, "SERVICEBUS_CONNECTION_STRING_LISTEN_ONLY") } +func GetIdentityVars(t *testing.T) *struct { + TenantID string + ClientID string + Secret string + Endpoint string +} { + runningLiveTest := GetConnectionString(t) != "" + + if !runningLiveTest { + return nil + } + + return &struct { + TenantID string + ClientID string + Secret string + Endpoint string + }{ + TenantID: getEnvOrSkipTest(t, "AZURE_TENANT_ID"), + ClientID: getEnvOrSkipTest(t, "AZURE_CLIENT_ID"), + Endpoint: getEnvOrSkipTest(t, "SERVICEBUS_ENDPOINT"), + Secret: getEnvOrSkipTest(t, "AZURE_CLIENT_SECRET"), + } +} + func getEnvOrSkipTest(t *testing.T, name string) string { cs := os.Getenv(name) diff --git a/sdk/messaging/azservicebus/internal/utils/logger.go b/sdk/messaging/azservicebus/internal/utils/logger.go new file mode 100644 index 000000000000..7298427e0251 --- /dev/null +++ b/sdk/messaging/azservicebus/internal/utils/logger.go @@ -0,0 +1,37 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package utils + +import ( + "fmt" + "sync/atomic" + + azlog "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +type Logger struct { + prefix *atomic.Value +} + +func NewLogger() Logger { + value := &atomic.Value{} + value.Store("") + + return Logger{ + prefix: value, + } +} + +func (l *Logger) SetPrefix(format string, args ...any) { + l.prefix.Store(fmt.Sprintf("["+format+"] ", args...)) +} + +func (l *Logger) Prefix() string { + return l.prefix.Load().(string) +} + +func (l *Logger) Writef(evt azlog.Event, format string, args ...any) { + prefix := l.prefix.Load().(string) + azlog.Writef(evt, prefix+format, args...) +} diff --git a/sdk/messaging/azservicebus/internal/utils/retrier.go b/sdk/messaging/azservicebus/internal/utils/retrier.go index 53d03a68202e..5fe3434d7842 100644 --- a/sdk/messaging/azservicebus/internal/utils/retrier.go +++ b/sdk/messaging/azservicebus/internal/utils/retrier.go @@ -50,7 +50,7 @@ func Retry(ctx context.Context, eventName log.Event, operation string, fn func(c for i := int32(0); i <= ro.MaxRetries; i++ { if i > 0 { sleep := calcDelay(ro, i) - log.Writef(eventName, "(%s) Retry attempt %d sleeping for %s", operation, i, sleep) + log.Writef(eventName, "%s Retry attempt %d sleeping for %s", operation, i, sleep) select { case <-ctx.Done(): @@ -66,7 +66,7 @@ func Retry(ctx context.Context, eventName log.Event, operation string, fn func(c err = fn(ctx, &args) if args.resetAttempts { - log.Writef(eventName, "(%s) Resetting retry attempts", operation) + log.Writef(eventName, "%s Resetting retry attempts", operation) // it looks weird, but we're doing -1 here because the post-increment // will set it back to 0, which is what we want - go back to the 0th @@ -79,13 +79,13 @@ func Retry(ctx context.Context, eventName log.Event, operation string, fn func(c if err != nil { if isFatalFn(err) { if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { - log.Writef(eventName, "(%s) Retry attempt %d was cancelled, stopping: %s", operation, i, err.Error()) + log.Writef(eventName, "%s Retry attempt %d was cancelled, stopping: %s", operation, i, err.Error()) } else { - log.Writef(eventName, "(%s) Retry attempt %d returned non-retryable error: %s", operation, i, err.Error()) + log.Writef(eventName, "%s Retry attempt %d returned non-retryable error: %s", operation, i, err.Error()) } return err } else { - log.Writef(eventName, "(%s) Retry attempt %d returned retryable error: %s", operation, i, err.Error()) + log.Writef(eventName, "%s Retry attempt %d returned retryable error: %s", operation, i, err.Error()) } continue diff --git a/sdk/messaging/azservicebus/internal/utils/retrier_test.go b/sdk/messaging/azservicebus/internal/utils/retrier_test.go index c316fcdf23a1..4c7282a5e897 100644 --- a/sdk/messaging/azservicebus/internal/utils/retrier_test.go +++ b/sdk/messaging/azservicebus/internal/utils/retrier_test.go @@ -294,7 +294,7 @@ func TestRetryLogging(t *testing.T) { t.Run("normal error", func(t *testing.T) { logsFn := test.CaptureLogsForTest(false) - err := Retry(context.Background(), testLogEvent, "my_operation", func(ctx context.Context, args *RetryFnArgs) error { + err := Retry(context.Background(), testLogEvent, "(my_operation)", func(ctx context.Context, args *RetryFnArgs) error { azlog.Writef("TestFunc", "Attempt %d, within test func, returning error hello", args.I) return errors.New("hello") }, func(err error) bool { @@ -325,7 +325,7 @@ func TestRetryLogging(t *testing.T) { t.Run("normal error2", func(t *testing.T) { test.EnableStdoutLogging(t) - err := Retry(context.Background(), testLogEvent, "my_operation", func(ctx context.Context, args *RetryFnArgs) error { + err := Retry(context.Background(), testLogEvent, "(my_operation)", func(ctx context.Context, args *RetryFnArgs) error { azlog.Writef("TestFunc", "Attempt %d, within test func, returning error hello", args.I) return errors.New("hello") }, func(err error) bool { @@ -339,7 +339,7 @@ func TestRetryLogging(t *testing.T) { t.Run("cancellation error", func(t *testing.T) { logsFn := test.CaptureLogsForTest(false) - err := Retry(context.Background(), testLogEvent, "test_operation", func(ctx context.Context, args *RetryFnArgs) error { + err := Retry(context.Background(), testLogEvent, "(test_operation)", func(ctx context.Context, args *RetryFnArgs) error { azlog.Writef("TestFunc", "Attempt %d, within test func", args.I) return context.Canceled @@ -359,7 +359,7 @@ func TestRetryLogging(t *testing.T) { t.Run("custom fatal error", func(t *testing.T) { logsFn := test.CaptureLogsForTest(false) - err := Retry(context.Background(), testLogEvent, "test_operation", func(ctx context.Context, args *RetryFnArgs) error { + err := Retry(context.Background(), testLogEvent, "(test_operation)", func(ctx context.Context, args *RetryFnArgs) error { azlog.Writef("TestFunc", "Attempt %d, within test func", args.I) return errors.New("custom fatal error") @@ -380,7 +380,7 @@ func TestRetryLogging(t *testing.T) { logsFn := test.CaptureLogsForTest(false) reset := false - err := Retry(context.Background(), testLogEvent, "test_operation", func(ctx context.Context, args *RetryFnArgs) error { + err := Retry(context.Background(), testLogEvent, "(test_operation)", func(ctx context.Context, args *RetryFnArgs) error { azlog.Writef("TestFunc", "Attempt %d, within test func", args.I) if !reset { diff --git a/sdk/messaging/azservicebus/receiver.go b/sdk/messaging/azservicebus/receiver.go index 59b5b48db698..240fd4b2924b 100644 --- a/sdk/messaging/azservicebus/receiver.go +++ b/sdk/messaging/azservicebus/receiver.go @@ -11,7 +11,6 @@ import ( "sync/atomic" "time" - "github.com/Azure/azure-sdk-for-go/sdk/internal/log" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/amqpwrap" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/exported" @@ -321,7 +320,7 @@ func (r *Receiver) RenewMessageLock(ctx context.Context, msg *ReceivedMessage, o func (r *Receiver) Close(ctx context.Context) error { cancelReleaser := r.cancelReleaser.Swap(emptyCancelFn).(func() string) releaserID := cancelReleaser() - log.Writef(EventReceiver, "Stopped message releaser with ID '%s'", releaserID) + r.amqpLinks.Writef(EventReceiver, "Stopped message releaser with ID '%s'", releaserID) r.cleanupOnClose() return r.amqpLinks.Close(ctx, true) @@ -388,21 +387,20 @@ func (r *Receiver) receiveMessagesImpl(ctx context.Context, maxMessages int, opt // might have exited before all credits were used up. currentReceiverCredits := int64(linksWithID.Receiver.Credits()) creditsToIssue := int64(maxMessages) - currentReceiverCredits - log.Writef(EventReceiver, "Asking for %d credits", maxMessages) if creditsToIssue > 0 { - log.Writef(EventReceiver, "Only need to issue %d additional credits", creditsToIssue) + r.amqpLinks.Writef(EventReceiver, "Issuing %d credits, have %d", creditsToIssue, currentReceiverCredits) if err := linksWithID.Receiver.IssueCredit(uint32(creditsToIssue)); err != nil { return nil, err } } else { - log.Writef(EventReceiver, "No additional credits needed, still have %d credits active", currentReceiverCredits) + r.amqpLinks.Writef(EventReceiver, "Have %d credits, no new credits needed", currentReceiverCredits) } result := r.fetchMessages(ctx, linksWithID.Receiver, maxMessages, r.defaultTimeAfterFirstMsg) - log.Writef(EventReceiver, "Received %d/%d messages", len(result.Messages), maxMessages) + r.amqpLinks.Writef(EventReceiver, "Received %d/%d messages", len(result.Messages), maxMessages) // this'll only close anything if the error indicates that the link/connection is bad. // it's safe to call with cancellation errors. @@ -417,7 +415,7 @@ func (r *Receiver) receiveMessagesImpl(ctx context.Context, maxMessages int, opt releaserFunc := r.newReleaserFunc(linksWithID.Receiver) go releaserFunc() } else { - log.Writef(EventReceiver, "Failure when receiving messages: %s", result.Error) + r.amqpLinks.Writef(EventReceiver, "Failure when receiving messages: %s", result.Error) } // If the user does get some messages we ignore 'error' and return only the messages. @@ -616,8 +614,6 @@ func (r *Receiver) newReleaserFunc(receiver amqpwrap.AMQPReceiver) func() { return func() { defer close(done) - log.Writef(EventReceiver, "[%s] Message releaser starting...", receiver.LinkName()) - for { // we might not have all the messages we need here. msg, err := receiver.Receive(ctx, nil) @@ -631,10 +627,12 @@ func (r *Receiver) newReleaserFunc(receiver amqpwrap.AMQPReceiver) func() { } if internal.IsCancelError(err) { - log.Writef(exported.EventReceiver, "[%s] Message releaser pausing. Released %d messages", receiver.LinkName(), released) + if released > 0 { + r.amqpLinks.Writef(exported.EventReceiver, "Message releaser pausing. Released %d messages", released) + } break } else if internal.GetRecoveryKind(err) != internal.RecoveryKindNone { - log.Writef(exported.EventReceiver, "[%s] Message releaser stopping because of link failure. Released %d messages. Will start again after next receive: %s", receiver.LinkName(), released, err) + r.amqpLinks.Writef(exported.EventReceiver, "Message releaser stopping because of link failure. Released %d messages. Will start again after next receive: %s", released, err) break } } diff --git a/sdk/messaging/azservicebus/receiver_simulated_test.go b/sdk/messaging/azservicebus/receiver_simulated_test.go index f61cd95878b5..18f7210aacda 100644 --- a/sdk/messaging/azservicebus/receiver_simulated_test.go +++ b/sdk/messaging/azservicebus/receiver_simulated_test.go @@ -632,7 +632,9 @@ func TestReceiver_CreditsDontExceedMax(t *testing.T) { messages, err = receiver.ReceiveMessages(baseReceiveCtx, 5000, nil) require.NoError(t, err) require.Equal(t, []string{"hello world"}, getSortedBodies(messages)) - require.Contains(t, logsFn(), "[azsb.Receiver] No additional credits needed, still have 5000 credits active") + logs := logsFn() + + require.Contains(t, logs, "[azsb.Receiver] [c:1, l:1, r:name:c:001|] Have 5000 credits, no new credits needed") ctx, cancel = context.WithTimeout(baseReceiveCtx, time.Second) defer cancel() @@ -644,7 +646,7 @@ func TestReceiver_CreditsDontExceedMax(t *testing.T) { messages, err = receiver.ReceiveMessages(ctx, 5000, nil) require.ErrorIs(t, err, context.DeadlineExceeded) require.Empty(t, messages) - require.Contains(t, logsFn(), "[azsb.Receiver] Only need to issue 1 additional credits") + require.Contains(t, logsFn(), "[azsb.Receiver] [c:1, l:1, r:name:c:001|] Issuing 1 credits, have 4999") require.Equal(t, 1, len(md.Events.GetOpenConns())) require.Equal(t, 3+3, len(md.Events.GetOpenLinks()), "Sender and Receiver each own 3 links apiece ($mgmt, actual link)") diff --git a/sdk/messaging/azservicebus/receiver_test.go b/sdk/messaging/azservicebus/receiver_test.go index 640a6abe651e..0d691447c9cc 100644 --- a/sdk/messaging/azservicebus/receiver_test.go +++ b/sdk/messaging/azservicebus/receiver_test.go @@ -6,7 +6,7 @@ package azservicebus import ( "context" "fmt" - "os" + "regexp" "sort" "strings" "testing" @@ -569,8 +569,11 @@ func TestReceiver_RenewMessageLock(t *testing.T) { logMessages := endCaptureFn() failedOnFirstTry := false + + re := regexp.MustCompile(`^\[azsb.Receiver\] \[c:1, l:1, r:name:[^\]]+\] \(renewMessageLock\) Retry attempt 0 returned non-retryable error`) + for _, msg := range logMessages { - if strings.HasPrefix(msg, "[azsb.Receiver] (renewMessageLock) Retry attempt 0 returned non-retryable error") { + if re.MatchString(msg) { failedOnFirstTry = true } } @@ -866,14 +869,15 @@ func TestReceiverUnauthorizedCreds(t *testing.T) { }) t.Run("invalid identity creds", func(t *testing.T) { - tenantID := os.Getenv("AZURE_TENANT_ID") - clientID := os.Getenv("AZURE_CLIENT_ID") - endpoint := os.Getenv("SERVICEBUS_ENDPOINT") + identityVars := test.GetIdentityVars(t) + if identityVars == nil { + return + } - cliCred, err := azidentity.NewClientSecretCredential(tenantID, clientID, "bogus-client-secret", nil) + cliCred, err := azidentity.NewClientSecretCredential(identityVars.TenantID, identityVars.ClientID, "bogus-client-secret", nil) require.NoError(t, err) - client, err := NewClient(endpoint, cliCred, nil) + client, err := NewClient(identityVars.Endpoint, cliCred, nil) require.NoError(t, err) defer test.RequireClose(t, client) diff --git a/sdk/messaging/azservicebus/receiver_unit_test.go b/sdk/messaging/azservicebus/receiver_unit_test.go index d06f86f5ddf4..50bf141d70f0 100644 --- a/sdk/messaging/azservicebus/receiver_unit_test.go +++ b/sdk/messaging/azservicebus/receiver_unit_test.go @@ -183,9 +183,11 @@ func TestReceiver_releaserFunc(t *testing.T) { <-receiverClosed t.Logf("Receiver has closed") + logs := logsFn() + require.Contains(t, - logsFn(), - fmt.Sprintf("[azsb.Receiver] [fakelink] Message releaser pausing. Released %d messages", successfulReleases), + logs, + fmt.Sprintf("[azsb.Receiver] [prefix] Message releaser pausing. Released %d messages", successfulReleases), ) } @@ -224,7 +226,7 @@ func TestReceiver_releaserFunc_errorOnFirstMessage(t *testing.T) { require.Contains(t, logsFn(), - fmt.Sprintf("[azsb.Receiver] [fakelink] Message releaser stopping because of link failure. Released 0 messages. Will start again after next receive: %s", &amqp.LinkError{})) + fmt.Sprintf("[azsb.Receiver] Message releaser stopping because of link failure. Released 0 messages. Will start again after next receive: %s", &amqp.LinkError{})) } func TestReceiver_releaserFunc_receiveAndDeleteIsNoop(t *testing.T) { From 9111616efbf886bc41c1776ee35b7de041ec3d02 Mon Sep 17 00:00:00 2001 From: Azure SDK Bot <53356347+azure-sdk@users.noreply.github.com> Date: Mon, 1 May 2023 12:52:37 -0700 Subject: [PATCH 04/50] update proxy version (#20712) Co-authored-by: Scott Beddall --- eng/common/testproxy/target_version.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eng/common/testproxy/target_version.txt b/eng/common/testproxy/target_version.txt index 5f4443c2a943..2e1adf9ccc4b 100644 --- a/eng/common/testproxy/target_version.txt +++ b/eng/common/testproxy/target_version.txt @@ -1 +1 @@ -1.0.0-dev.20230417.1 +1.0.0-dev.20230427.1 From d6bf19024564205922e0225ba4d70449d7ab0e02 Mon Sep 17 00:00:00 2001 From: Richard Park <51494936+richardpark-msft@users.noreply.github.com> Date: Mon, 1 May 2023 16:58:55 -0700 Subject: [PATCH 05/50] Return an error when you try to send a message that's too large. (#20721) This now works just like the message batch - you'll get an ErrMessageTooLarge if you attempt to send a message that's too large for the link's configured size. NOTE: there's a patch to `internal/go-amqp/Sender.go` to match what's in go-amqp's main so it returns a programmatically useful error when the message is too large. Fixes #20647 --- .../azservicebus/internal/go-amqp/sender.go | 10 ++++- sdk/messaging/azservicebus/message_batch.go | 3 +- sdk/messaging/azservicebus/sender.go | 15 +++++-- sdk/messaging/azservicebus/sender_test.go | 44 +++++++++++++++++++ 4 files changed, 66 insertions(+), 6 deletions(-) diff --git a/sdk/messaging/azservicebus/internal/go-amqp/sender.go b/sdk/messaging/azservicebus/internal/go-amqp/sender.go index dcfb4dc557be..afe17e7f8e1c 100644 --- a/sdk/messaging/azservicebus/internal/go-amqp/sender.go +++ b/sdk/messaging/azservicebus/internal/go-amqp/sender.go @@ -101,7 +101,10 @@ func (s *Sender) send(ctx context.Context, msg *Message, opts *SendOptions) (cha maxTransferFrameHeader = 66 // determined by calcMaxTransferFrameHeader ) if len(msg.DeliveryTag) > maxDeliveryTagLength { - return nil, fmt.Errorf("delivery tag is over the allowed %v bytes, len: %v", maxDeliveryTagLength, len(msg.DeliveryTag)) + return nil, &Error{ + Condition: ErrCondMessageSizeExceeded, + Description: fmt.Sprintf("delivery tag is over the allowed %v bytes, len: %v", maxDeliveryTagLength, len(msg.DeliveryTag)), + } } s.mu.Lock() @@ -114,7 +117,10 @@ func (s *Sender) send(ctx context.Context, msg *Message, opts *SendOptions) (cha } if s.l.maxMessageSize != 0 && uint64(s.buf.Len()) > s.l.maxMessageSize { - return nil, fmt.Errorf("encoded message size exceeds max of %d", s.l.maxMessageSize) + return nil, &Error{ + Condition: ErrCondMessageSizeExceeded, + Description: fmt.Sprintf("encoded message size exceeds max of %d", s.l.maxMessageSize), + } } senderSettled := senderSettleModeValue(s.l.senderSettleMode) == SenderSettleModeSettled diff --git a/sdk/messaging/azservicebus/message_batch.go b/sdk/messaging/azservicebus/message_batch.go index fd33a0195ff5..1fde3d4bd761 100644 --- a/sdk/messaging/azservicebus/message_batch.go +++ b/sdk/messaging/azservicebus/message_batch.go @@ -12,7 +12,8 @@ import ( ) // ErrMessageTooLarge is returned when a message cannot fit into a batch when using MessageBatch.Add() -var ErrMessageTooLarge = errors.New("the message could not be added because it is too large for the batch") +// or if the message is being sent on its own and is too large for the link. +var ErrMessageTooLarge = errors.New("the message is too large") type ( // MessageBatch represents a batch of messages to send to Service Bus in a single message diff --git a/sdk/messaging/azservicebus/sender.go b/sdk/messaging/azservicebus/sender.go index 1c549c8094df..24e18ab23f58 100644 --- a/sdk/messaging/azservicebus/sender.go +++ b/sdk/messaging/azservicebus/sender.go @@ -5,6 +5,7 @@ package azservicebus import ( "context" + "errors" "time" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal" @@ -33,7 +34,7 @@ type MessageBatchOptions struct { // NewMessageBatch can be used to create a batch that contain multiple // messages. Sending a batch of messages is more efficient than sending the // messages one at a time. -// If the operation fails it can return an *azservicebus.Error type if the failure is actionable. +// If the operation fails it can return an [*azservicebus.Error] type if the failure is actionable. func (s *Sender) NewMessageBatch(ctx context.Context, options *MessageBatchOptions) (*MessageBatch, error) { var batch *MessageBatch @@ -61,7 +62,9 @@ type SendMessageOptions struct { } // SendMessage sends a Message to a queue or topic. -// If the operation fails it can return an *azservicebus.Error type if the failure is actionable. +// If the operation fails it can return: +// - [ErrMessageTooLarge] if the message is larger than the maximum allowed link size. +// - An [*azservicebus.Error] type if the failure is actionable. func (s *Sender) SendMessage(ctx context.Context, message *Message, options *SendMessageOptions) error { return s.sendMessage(ctx, message) } @@ -74,7 +77,9 @@ type SendAMQPAnnotatedMessageOptions struct { // SendAMQPAnnotatedMessage sends an AMQPMessage to a queue or topic. // Using an AMQPMessage allows for advanced use cases, like payload encoding, as well as better // interoperability with pure AMQP clients. -// If the operation fails it can return an *azservicebus.Error type if the failure is actionable. +// If the operation fails it can return: +// - [ErrMessageTooLarge] if the message is larger than the maximum allowed link size. +// - An [*azservicebus.Error] type if the failure is actionable. func (s *Sender) SendAMQPAnnotatedMessage(ctx context.Context, message *AMQPAnnotatedMessage, options *SendAMQPAnnotatedMessageOptions) error { return s.sendMessage(ctx, message) } @@ -171,6 +176,10 @@ func (s *Sender) sendMessage(ctx context.Context, message amqpCompatibleMessage) return lwid.Sender.Send(ctx, message.toAMQPMessage(), nil) }, RetryOptions(s.retryOptions)) + if amqpErr := (*amqp.Error)(nil); errors.As(err, &amqpErr) && amqpErr.Condition == amqp.ErrCondMessageSizeExceeded { + return ErrMessageTooLarge + } + return internal.TransformError(err) } diff --git a/sdk/messaging/azservicebus/sender_test.go b/sdk/messaging/azservicebus/sender_test.go index 0c0892a55b28..e3648f8f9148 100644 --- a/sdk/messaging/azservicebus/sender_test.go +++ b/sdk/messaging/azservicebus/sender_test.go @@ -734,3 +734,47 @@ func (rm receivedMessages) Less(i, j int) bool { func (rm receivedMessages) Swap(i, j int) { rm[i], rm[j] = rm[j], rm[i] } + +func Test_Sender_Send_MessageTooBig(t *testing.T) { + client, cleanup, queueName := setupLiveTest(t, &liveTestOptions{ + ClientOptions: &ClientOptions{ + RetryOptions: RetryOptions{ + // This is a purposefully ridiculous wait time but we'll never hit it + // because exceeding the max message size is NOT a retryable error. + RetryDelay: time.Hour, + }, + }, + QueueProperties: &admin.QueueProperties{ + EnablePartitioning: to.Ptr(true), + }}) + defer cleanup() + + sender, err := client.NewSender(queueName, nil) + require.NoError(t, err) + + hugePayload := []byte{} + + for i := 0; i < 1000*1000; i++ { + hugePayload = append(hugePayload, 100) + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + err = sender.SendMessage(ctx, &Message{ + MessageID: to.Ptr("message with a message ID"), + Body: hugePayload, + }, nil) + + require.ErrorIs(t, err, ErrMessageTooLarge) + + ctx, cancel = context.WithTimeout(context.Background(), time.Minute) + defer cancel() + + err = sender.SendAMQPAnnotatedMessage(ctx, &AMQPAnnotatedMessage{ + Body: AMQPAnnotatedMessageBody{ + Data: [][]byte{hugePayload}, + }, + }, nil) + + require.ErrorIs(t, err, ErrMessageTooLarge) +} From e2693bd70afbd48ca362673a0ad906cfc2ad9ef7 Mon Sep 17 00:00:00 2001 From: siminsavani-msft <77068571+siminsavani-msft@users.noreply.github.com> Date: Tue, 2 May 2023 12:42:54 -0400 Subject: [PATCH 06/50] Changes in test that is failing in pipeline (#20693) --- sdk/storage/azblob/service/client_test.go | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/sdk/storage/azblob/service/client_test.go b/sdk/storage/azblob/service/client_test.go index 7fb06d12d6c8..7d9c77090956 100644 --- a/sdk/storage/azblob/service/client_test.go +++ b/sdk/storage/azblob/service/client_test.go @@ -985,13 +985,11 @@ func (s *ServiceUnrecordedTestsSuite) TestServiceSASUploadDownload() { }.SignWithSharedKey(credential) _require.Nil(err) - sasURL := svcClient.URL() - if len(sasURL) > 0 && sasURL[len(sasURL)-1:] != "/" { - sasURL += "/" - } - sasURL += "?" + sasQueryParams.Encode() + srcBlobParts, _ := blob.ParseURL(svcClient.URL()) + srcBlobParts.SAS = sasQueryParams + srcBlobURLWithSAS := srcBlobParts.String() - azClient, err := azblob.NewClientWithNoCredential(sasURL, nil) + azClient, err := azblob.NewClientWithNoCredential(srcBlobURLWithSAS, nil) _require.Nil(err) const blobData = "test data" From 03f0ac33e13a1bfcc57325380994a51df7bb0dcf Mon Sep 17 00:00:00 2001 From: Richard Park <51494936+richardpark-msft@users.noreply.github.com> Date: Tue, 2 May 2023 10:22:57 -0700 Subject: [PATCH 07/50] [azservicebus, azeventhubs] Treat 'entity full' as a fatal error (#20722) When the remote entity is full we get a resource-limit-exceeded condition. This isn't something we should keep retrying on and it's best to just abort and let the user know immediately, rather than hoping it might eventually clear out. This affected both Event Hubs and Service Bus. Fixes #20647 --- sdk/messaging/azeventhubs/CHANGELOG.md | 24 ++-- sdk/messaging/azeventhubs/internal/errors.go | 3 + .../azeventhubs/internal/errors_test.go | 1 + sdk/messaging/azservicebus/CHANGELOG.md | 111 ++++++++++-------- sdk/messaging/azservicebus/internal/errors.go | 3 + .../azservicebus/internal/errors_test.go | 1 + 6 files changed, 81 insertions(+), 62 deletions(-) diff --git a/sdk/messaging/azeventhubs/CHANGELOG.md b/sdk/messaging/azeventhubs/CHANGELOG.md index 7d3c71c3266e..fede8f8742ce 100644 --- a/sdk/messaging/azeventhubs/CHANGELOG.md +++ b/sdk/messaging/azeventhubs/CHANGELOG.md @@ -1,6 +1,6 @@ # Release History -## 1.0.0 (2023-04-11) +## 1.0.0 (2023-05-09) ### Features Added @@ -13,13 +13,14 @@ - Recovery now includes internal timeouts and also handles restarting a connection if AMQP primitives aren't closed cleanly. - Potential leaks for $cbs and $management when there was a partial failure. (PR#20564) - Latest go-amqp changes have been merged in with fixes for robustness. +- Sending a message to an entity that is full will no longer retry. (PR#20722) ## 0.6.0 (2023-03-07) ### Features Added -- Added the `ConsumerClientOptions.InstanceID` field. This optional field can enhance error messages from - Event Hubs. For example, error messages related to ownership changes for a partition will contain the +- Added the `ConsumerClientOptions.InstanceID` field. This optional field can enhance error messages from + Event Hubs. For example, error messages related to ownership changes for a partition will contain the name of the link that has taken ownership, which can help with traceability. ### Breaking Changes @@ -41,7 +42,7 @@ ### Breaking Changes - ProcessorOptions.OwnerLevel has been removed. The Processor uses 0 as the owner level. -- Uses the public release of `github.com/Azure/azure-sdk-for-go/sdk/storage/azblob` package rather than using an internal copy. +- Uses the public release of `github.com/Azure/azure-sdk-for-go/sdk/storage/azblob` package rather than using an internal copy. For an example, see [example_consuming_with_checkpoints_test.go](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/messaging/azeventhubs/example_consuming_with_checkpoints_test.go). ## 0.4.0 (2023-01-10) @@ -49,7 +50,7 @@ ### Bugs Fixed - User-Agent was incorrectly formatted in our AMQP-based clients. (PR#19712) -- Connection recovery has been improved, removing some unnecessasry retries as well as adding a bound around +- Connection recovery has been improved, removing some unnecessasry retries as well as adding a bound around some operations (Close) that could potentially block recovery for a long time. (PR#19683) ## 0.3.0 (2022-11-10) @@ -78,7 +79,7 @@ - NewWebSocketConnArgs renamed to WebSocketConnParams - Code renamed to ErrorCode, including associated constants like `ErrorCodeOwnershipLost`. - OwnershipData, CheckpointData, and CheckpointStoreAddress have been folded into their individual structs: Ownership and Checkpoint. -- StartPosition and OwnerLevel were erroneously included in the ConsumerClientOptions struct - they've been removed. These can be +- StartPosition and OwnerLevel were erroneously included in the ConsumerClientOptions struct - they've been removed. These can be configured in the PartitionClientOptions. ### Bugs Fixed @@ -90,8 +91,8 @@ ### Features Added -- Adding in the new Processor type, which can be used to do distributed (and load balanced) consumption of events, using a - CheckpointStore. The built-in checkpoints.BlobStore uses Azure Blob Storage for persistence. A full example is +- Adding in the new Processor type, which can be used to do distributed (and load balanced) consumption of events, using a + CheckpointStore. The built-in checkpoints.BlobStore uses Azure Blob Storage for persistence. A full example is in [example_consuming_with_checkpoints_test.go](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/messaging/azeventhubs/example_consuming_with_checkpoints_test.go). ### Breaking Changes @@ -101,6 +102,7 @@ instances (using ConsumerClient.NewPartitionClient), which allows you to share the same AMQP connection and receive from multiple partitions simultaneously. - Changes to EventData/ReceivedEventData: + - ReceivedEventData now embeds EventData for fields common between the two, making it easier to change and resend. - `ApplicationProperties` renamed to `Properties`. - `PartitionKey` removed from `EventData`. To send events using a PartitionKey you must set it in the options @@ -108,8 +110,8 @@ ```go batch, err := producerClient.NewEventDataBatch(context.TODO(), &azeventhubs.NewEventDataBatchOptions{ - PartitionKey: to.Ptr("partition key"), - }) + PartitionKey: to.Ptr("partition key"), + }) ``` ### Bugs Fixed @@ -120,4 +122,4 @@ ## 0.1.0 (2022-08-11) -- Initial preview for the new version of the Azure Event Hubs Go SDK. +- Initial preview for the new version of the Azure Event Hubs Go SDK. diff --git a/sdk/messaging/azeventhubs/internal/errors.go b/sdk/messaging/azeventhubs/internal/errors.go index b0b08b0c57fc..2f9fb1a90b56 100644 --- a/sdk/messaging/azeventhubs/internal/errors.go +++ b/sdk/messaging/azeventhubs/internal/errors.go @@ -139,6 +139,9 @@ var amqpConditionsToRecoveryKind = map[amqp.ErrCond]RecoveryKind{ amqp.ErrCondInternalError: RecoveryKindConn, // "amqp:internal-error" // No recovery possible - this operation is non retriable. + + // ErrCondResourceLimitExceeded comes back if the entity is actually full. + amqp.ErrCondResourceLimitExceeded: RecoveryKindFatal, // "amqp:resource-limit-exceeded" amqp.ErrCondMessageSizeExceeded: RecoveryKindFatal, // "amqp:link:message-size-exceeded" amqp.ErrCondUnauthorizedAccess: RecoveryKindFatal, // creds are bad amqp.ErrCondNotFound: RecoveryKindFatal, // "amqp:not-found" diff --git a/sdk/messaging/azeventhubs/internal/errors_test.go b/sdk/messaging/azeventhubs/internal/errors_test.go index b7b35dbc95ed..e51ad2cab27f 100644 --- a/sdk/messaging/azeventhubs/internal/errors_test.go +++ b/sdk/messaging/azeventhubs/internal/errors_test.go @@ -44,6 +44,7 @@ func TestGetRecoveryKind(t *testing.T) { require.Equal(t, GetRecoveryKind(context.Canceled), RecoveryKindFatal) require.Equal(t, GetRecoveryKind(RPCError{Resp: &amqpwrap.RPCResponse{Code: http.StatusUnauthorized}}), RecoveryKindFatal) require.Equal(t, GetRecoveryKind(RPCError{Resp: &amqpwrap.RPCResponse{Code: http.StatusNotFound}}), RecoveryKindFatal) + require.Equal(t, GetRecoveryKind(&amqp.Error{Condition: amqp.ErrCondResourceLimitExceeded}), RecoveryKindFatal) } func Test_TransformError(t *testing.T) { diff --git a/sdk/messaging/azservicebus/CHANGELOG.md b/sdk/messaging/azservicebus/CHANGELOG.md index 85df8928245c..d7af17b754fe 100644 --- a/sdk/messaging/azservicebus/CHANGELOG.md +++ b/sdk/messaging/azservicebus/CHANGELOG.md @@ -1,6 +1,6 @@ # Release History -## 1.3.0 (2023-04-11) +## 1.3.0 (2023-05-09) ### Features Added @@ -11,6 +11,8 @@ - Authentication errors could cause unnecessary retries, making calls taking longer to fail. (PR#20447) - Recovery now includes internal timeouts and also handles restarting a connection if AMQP primitives aren't closed cleanly. - Potential leaks for $cbs and $management when there was a partial failure. (PR#20564) +- Sending a message to an entity that is full will no longer retry. (PR#20722) +- Sending a message that is larger than what the link supports now returns ErrMessageTooLarge. (PR#20721) - Latest go-amqp changes have been merged in with fixes for robustness. ## 1.2.1 (2023-03-07) @@ -38,7 +40,7 @@ ### Bugs Fixed -- Removing changes for client-side idle timer and closing without timeout. Combined these are +- Removing changes for client-side idle timer and closing without timeout. Combined these are causing issues with links not properly recovering or closing. Investigating an alternative for a future release. @@ -46,7 +48,7 @@ ### Features Added -- Added a client-side idle timer which will reset Receiver links, transparently, if the link is idle for +- Added a client-side idle timer which will reset Receiver links, transparently, if the link is idle for 5 minutes. ### Bugs Fixed @@ -57,8 +59,8 @@ ### Bugs Fixed -- AcceptNextSessionForQueue and AcceptNextSessionForSubscription now return an azservicebus.Error with - Code set to CodeTimeout when they fail due to no sessions being available. Examples for this have +- AcceptNextSessionForQueue and AcceptNextSessionForSubscription now return an azservicebus.Error with + Code set to CodeTimeout when they fail due to no sessions being available. Examples for this have been added for `AcceptNextSessionForQueue`. PR#19113. - Retries now respect cancellation when they're in the "delay before next try" phase. @@ -66,19 +68,19 @@ ### Features Added -- Full access to send and receive all AMQP message properties. (#18413) +- Full access to send and receive all AMQP message properties. (#18413) - Send AMQP messages using the new `AMQPAnnotatedMessage` type and `Sender.SendAMQPAnnotatedMessage()`. - AMQP messages can be added to MessageBatch's as well using `MessageBatch.AddAMQPAnnotatedMessage()`. - AMQP messages can be scheduled using `Sender.ScheduleAMQPAnnotatedMessages`. - - Access the full set of AMQP message properties when receiving using the `ReceivedMessage.RawAMQPMessage` property. + - Access the full set of AMQP message properties when receiving using the `ReceivedMessage.RawAMQPMessage` property. ### Bugs Fixed -- Changed receive messages algorithm to avoid messages being excessively locked in Service Bus without +- Changed receive messages algorithm to avoid messages being excessively locked in Service Bus without being transferred to the client. (PR#18657) - Updating go-amqp, which fixes several bugs related to incorrect message locking (PR#18599) - - Requesting large quantities of messages in a single ReceiveMessages() call could result in messages - not being delivered, but still incrementing their delivery count and requiring the message lock + - Requesting large quantities of messages in a single ReceiveMessages() call could result in messages + not being delivered, but still incrementing their delivery count and requiring the message lock timeout to expire. - Link detach could result in messages being ignored, requiring the message lock timeout to expire. - Subscription rules weren't deserializing properly when created from the portal (PR#18813) @@ -87,11 +89,11 @@ ### Features Added -- Full access to send and receive all AMQP message properties. (#18413) +- Full access to send and receive all AMQP message properties. (#18413) - Send AMQP messages using the new `AMQPAnnotatedMessage` type and `Sender.SendAMQPAnnotatedMessage()`. - AMQP messages can be added to MessageBatch's as well using `MessageBatch.AddAMQPAnnotatedMessage()`. - AMQP messages can be scheduled using `Sender.ScheduleAMQPAnnotatedMessages`. - - Access the full set of AMQP message properties when receiving using the `ReceivedMessage.RawAMQPMessage` property. + - Access the full set of AMQP message properties when receiving using the `ReceivedMessage.RawAMQPMessage` property. ### Bugs Fixed @@ -107,7 +109,7 @@ ### Bugs Fixed - Handle a missing CountDetails node in the returned responses for GetRuntimeProperties which could cause a panic. (#18213) -- Adding the `associated-link-name` property to management operations (RenewLock, settlement and others), which +- Adding the `associated-link-name` property to management operations (RenewLock, settlement and others), which can help extend link lifetime (#18291) - Namespace closing didn't reset the internal client, which could lead to connection recovery thrashing. (#18323) @@ -122,16 +124,16 @@ ### Features Added - Exported log.Event constants for azservicebus. This will make them easier to - discover and they are also documented. NOTE: The log messages themselves + discover and they are also documented. NOTE: The log messages themselves are not guaranteed to be stable. (#17596) -- `admin.Client` can now manage authorization rules and subscription filters and +- `admin.Client` can now manage authorization rules and subscription filters and actions. (#17616) - Exported an official `*azservicebus.Error` type that gets returned if the failure is actionable. This can indicate if the connection was lost and could not be recovered with the configured retries or if a message lock was lost, which would cause - message settlement to fail. + message settlement to fail. - See the `ExampleReceiver_ReceiveMessages` in example_receiver_test.go for an example + See the `ExampleReceiver_ReceiveMessages` in example_receiver_test.go for an example on how to use it. (#17786) ### Breaking Changes @@ -142,7 +144,7 @@ ### Bugs Fixed -- Fixing issue where the AcceptNextSessionForQueue and AcceptNextSessionForSubscription +- Fixing issue where the AcceptNextSessionForQueue and AcceptNextSessionForSubscription couldn't be cancelled, forcing the user to wait for the service to timeout. (#17598) - Fixing bug where there was a chance that internally cached messages would not be returned when the receiver was draining. (#17893) @@ -158,7 +160,7 @@ - Fixed bug where message batch size calculation was inaccurate, resulting in batches that were too large to be sent. (#17318) - Fixing an issue with an entity not being found leading to a longer timeout than needed. (#17279) - Fixed the RPCLink so it does better handling of connection/link failures. (#17389) -- Fixed issue where a message lock expiring would cause unnecessary retries. These retries could cause message settlement calls (ex: Receiver.CompleteMessage) +- Fixed issue where a message lock expiring would cause unnecessary retries. These retries could cause message settlement calls (ex: Receiver.CompleteMessage) to appear to hang. (#17382) - Fixed issue where a cancellation on ReceiveMessages() would work, but wouldn't return the proper cancellation error. (#17422) @@ -168,54 +170,60 @@ - Multiple functions have had `options` parameters added. - `SessionReceiver.RenewMessageLock` has been removed - it isn't used for sessions. SessionReceivers should use `SessionReceiver.RenewSessionLock`. - The `admin.Client` type has been changed to conform with the latest Azure Go SDK guidelines. As part of this: + - Embedded `*Result` structs in `admin.Client`'s APIs have been removed. Inner *Properties values have been hoisted up to the `*Response` instead. - - `.Response` fields have been removed for successful results. These will be added back using a different pattern in the next release. - - Fields that were of type `time.Duration` have been changed to `*string`, where the value of the string is an ISO8601 timestamp. - Affected fields from Queues, Topics and Subscriptions: AutoDeleteOnIdle, DefaultMessageTimeToLive, DuplicateDetectionHistoryTimeWindow, LockDuration. + - `.Response` fields have been removed for successful results. These will be added back using a different pattern in the next release. + - Fields that were of type `time.Duration` have been changed to `*string`, where the value of the string is an ISO8601 timestamp. + Affected fields from Queues, Topics and Subscriptions: AutoDeleteOnIdle, DefaultMessageTimeToLive, DuplicateDetectionHistoryTimeWindow, LockDuration. - Properties that were passed as a parameter to CreateQueue, CreateTopic or CreateSubscription are now in the `options` parameter (as they were optional): Previously: + ```go // older code - adminClient.CreateQueue(context.Background(), queueName, &queueProperties, nil) + adminClient.CreateQueue(context.Background(), queueName, &queueProperties, nil) ``` And now: + ```go // new code adminClient.CreateQueue(context.Background(), queueName, &admin.CreateQueueOptions{ Properties: queueProperties, }) - ``` + ``` + - Pagers have been changed to use the new generics-based `runtime.Pager`: - + Previously: + ```go // older code for queuePager.NextPage(context.TODO()) { - for _, queue := range queuePager.PageResponse().Items { - fmt.Printf("Queue name: %s, max size in MB: %d\n", queue.QueueName, *queue.MaxSizeInMegabytes) - } - } - + for _, queue := range queuePager.PageResponse().Items { + fmt.Printf("Queue name: %s, max size in MB: %d\n", queue.QueueName, *queue.MaxSizeInMegabytes) + } + } + if err := queuePager.Err(); err != nil { panic(err) } ``` + And now: ```go // new code for queuePager.More() { - page, err := queuePager.NextPage(context.TODO()) + page, err := queuePager.NextPage(context.TODO()) - if err != nil { - panic(err) - } + if err != nil { + panic(err) + } - for _, queue := range page.Queues { - fmt.Printf("Queue name: %s, max size in MB: %d\n", queue.QueueName, *queue.MaxSizeInMegabytes) - } - } + for _, queue := range page.Queues { + fmt.Printf("Queue name: %s, max size in MB: %d\n", queue.QueueName, *queue.MaxSizeInMegabytes) + } + } ``` ## 0.3.6 (2022-03-08) @@ -248,7 +256,7 @@ ### Bugs Fixed -- Fix unaligned 64-bit atomic operation on mips. Thanks to @jackesdavid for contributing this fix. (#16847) +- Fix unaligned 64-bit atomic operation on mips. Thanks to @jackesdavid for contributing this fix. (#16847) - Multiple fixes to address connection/link recovery (#16831) - Fixing panic() when the links haven't been initialized (early cancellation) (#16941) - Handle 500 as a retryable code (no recovery needed) (#16925) @@ -259,7 +267,7 @@ - Support the pass-through of an Application ID when constructing an Azure Service Bus Client. PR#16558 (thanks halspang!) -### Bugs Fixed +### Bugs Fixed - Fixing connection/link recovery in Sender.SendMessages() and Sender.SendMessageBatch(). PR#16790 - Fixing bug in the management link which could cause it to panic during recovery. PR#16790 @@ -268,13 +276,13 @@ ### Features Added -- Enabling websocket support via `ClientOptions.NewWebSocketConn`. For an example, see the `ExampleNewClient_usingWebsockets` +- Enabling websocket support via `ClientOptions.NewWebSocketConn`. For an example, see the `ExampleNewClient_usingWebsockets` function in `example_client_test.go`. ### Breaking Changes -- Message properties that come from the standard AMQP message have been made into pointers, to allow them to be - properly omitted (or indicate that they've been omitted) when sending and receiving. +- Message properties that come from the standard AMQP message have been made into pointers, to allow them to be + properly omitted (or indicate that they've been omitted) when sending and receiving. ### Bugs Fixed @@ -283,13 +291,14 @@ - Attempting to settle messages received in ReceiveAndDelete mode would cause a panic. PR#16255 ### Other Changes + - Removed legacy dependencies, resulting in a much smaller package. ## 0.3.1 (2021-11-16) ### Bugs Fixed -- Updating go-amqp to v0.16.4 to fix a race condition found when running `go test -race`. Thanks to @peterzeller for reporting this issue. PR: #16168 +- Updating go-amqp to v0.16.4 to fix a race condition found when running `go test -race`. Thanks to @peterzeller for reporting this issue. PR: #16168 ## 0.3.0 (2021-11-12) @@ -303,27 +312,27 @@ - AdminClient has been moved into the `admin` subpackage. - ReceivedMessage.Body is now a function that returns a ([]byte, error), rather than being a field. - This protects against a potential data-loss scenario where a message is received with a payload + This protects against a potential data-loss scenario where a message is received with a payload encoded in the sequence or value sections of an AMQP message, which cannot be properly represented in the .Body. This will now return an error. -- Functions that have options or might have options in the future have an additional *options parameter. +- Functions that have options or might have options in the future have an additional \*options parameter. As usual, passing 'nil' ignores the options, and will cause the function to use defaults. -- MessageBatch.Add() has been renamed to MessageBatch.AddMessage(). AddMessage() now returns only an `error`, +- MessageBatch.Add() has been renamed to MessageBatch.AddMessage(). AddMessage() now returns only an `error`, with a sentinel error (ErrMessageTooLarge) signaling that the batch cannot fit a new message. - Sender.SendMessages() has been removed in favor of simplifications made in MessageBatch. ### Bugs Fixed -- ReceiveMessages has been tuned to match the .NET limits (which has worked well in practice). This partly addresses #15963, +- ReceiveMessages has been tuned to match the .NET limits (which has worked well in practice). This partly addresses #15963, as our default limit was far higher than needed. ## 0.2.0 (2021-11-02) ### Features Added -- Scheduling messages to be delivered at a later date, via the `Sender.ScheduleMessage(s)` function or +- Scheduling messages to be delivered at a later date, via the `Sender.ScheduleMessage(s)` function or setting `Message.ScheduledEnqueueTime`. -- Added in the `Sender.SendMessages([slice of sendable messages])` function, which batches messages +- Added in the `Sender.SendMessages([slice of sendable messages])` function, which batches messages automatically. Useful when you're sending multiple messages that you are already sure will be small enough to fit into a single batch. - Receiving from sessions using a SessionReceiver, created using Client.AcceptSessionFor(Queue|Subscription) @@ -338,4 +347,4 @@ ## 0.1.0 (2021-10-05) -- Initial preview for the new version of the Azure Service Bus Go SDK. +- Initial preview for the new version of the Azure Service Bus Go SDK. diff --git a/sdk/messaging/azservicebus/internal/errors.go b/sdk/messaging/azservicebus/internal/errors.go index 81d8ea07086b..b87d0621587a 100644 --- a/sdk/messaging/azservicebus/internal/errors.go +++ b/sdk/messaging/azservicebus/internal/errors.go @@ -160,6 +160,9 @@ var amqpConditionsToRecoveryKind = map[amqp.ErrCond]RecoveryKind{ amqp.ErrCondInternalError: RecoveryKindConn, // "amqp:internal-error" // No recovery possible - this operation is non retriable. + + // ErrCondResourceLimitExceeded comes back if the entity is actually full. + amqp.ErrCondResourceLimitExceeded: RecoveryKindFatal, // "amqp:resource-limit-exceeded" amqp.ErrCondMessageSizeExceeded: RecoveryKindFatal, // "amqp:link:message-size-exceeded" amqp.ErrCondUnauthorizedAccess: RecoveryKindFatal, // creds are bad amqp.ErrCondNotFound: RecoveryKindFatal, // "amqp:not-found" diff --git a/sdk/messaging/azservicebus/internal/errors_test.go b/sdk/messaging/azservicebus/internal/errors_test.go index c3fcdf2ad0c7..82658a0b60e6 100644 --- a/sdk/messaging/azservicebus/internal/errors_test.go +++ b/sdk/messaging/azservicebus/internal/errors_test.go @@ -227,6 +227,7 @@ func Test_ServiceBusError_LinkRecoveryNeeded(t *testing.T) { func Test_ServiceBusError_Fatal(t *testing.T) { var fatalConditions = []amqp.ErrCond{ amqp.ErrCondMessageSizeExceeded, + amqp.ErrCondResourceLimitExceeded, amqp.ErrCondUnauthorizedAccess, amqp.ErrCondNotFound, amqp.ErrCondNotAllowed, From 838842d21fe8b97b2d0380509a5660a14b66c4da Mon Sep 17 00:00:00 2001 From: Richard Park <51494936+richardpark-msft@users.noreply.github.com> Date: Tue, 2 May 2023 18:56:23 -0700 Subject: [PATCH 08/50] [azservicebus/azeventhubs] Redirect stderr and stdout to tee (#20726) --- sdk/messaging/azeventhubs/internal/eh/stress/deploy.ps1 | 2 +- .../internal/eh/stress/templates/stress-test-job.yaml | 2 +- sdk/messaging/azservicebus/internal/stress/deploy.ps1 | 2 +- .../azservicebus/internal/stress/templates/stress-test-job.yaml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/sdk/messaging/azeventhubs/internal/eh/stress/deploy.ps1 b/sdk/messaging/azeventhubs/internal/eh/stress/deploy.ps1 index 79cd01e79509..30bb9a04a7be 100644 --- a/sdk/messaging/azeventhubs/internal/eh/stress/deploy.ps1 +++ b/sdk/messaging/azeventhubs/internal/eh/stress/deploy.ps1 @@ -9,7 +9,7 @@ function deployUsingLocalAddons() { $helmEnv = "pg2" if (-not (Get-ChildItem $stressTestAddonsFolder)) { - Write-Host "Can't find the the new stress test adons folder at $stressTestAddonsFolder" + Write-Host "Can't find the the new stress test addons folder at $stressTestAddonsFolder" return } diff --git a/sdk/messaging/azeventhubs/internal/eh/stress/templates/stress-test-job.yaml b/sdk/messaging/azeventhubs/internal/eh/stress/templates/stress-test-job.yaml index cfaec8a06a48..4a6ff57cc6a0 100644 --- a/sdk/messaging/azeventhubs/internal/eh/stress/templates/stress-test-job.yaml +++ b/sdk/messaging/azeventhubs/internal/eh/stress/templates/stress-test-job.yaml @@ -22,7 +22,7 @@ spec: - > set -ex; mkdir -p "$DEBUG_SHARE"; - /app/stress "{{.Stress.testTarget}}" "-rounds" "{{.Stress.rounds}}" "-prefetch" "{{.Stress.prefetch}}" "{{.Stress.verbose}}" "-sleepAfter" "{{.Stress.sleepAfter}}" | tee -a "${DEBUG_SHARE}/{{ .Stress.Scenario }}-`date +%s`.log"; + /app/stress "{{.Stress.testTarget}}" "-rounds" "{{.Stress.rounds}}" "-prefetch" "{{.Stress.prefetch}}" "{{.Stress.verbose}}" "-sleepAfter" "{{.Stress.sleepAfter}}" 2>&1 | tee -a "${DEBUG_SHARE}/{{ .Stress.Scenario }}-`date +%s`.log"; # Pulls the image on pod start, always. We tend to push to the same image and tag over and over again # when iterating, so this is a must. imagePullPolicy: Always diff --git a/sdk/messaging/azservicebus/internal/stress/deploy.ps1 b/sdk/messaging/azservicebus/internal/stress/deploy.ps1 index e1026202066f..c6191940ade6 100644 --- a/sdk/messaging/azservicebus/internal/stress/deploy.ps1 +++ b/sdk/messaging/azservicebus/internal/stress/deploy.ps1 @@ -8,7 +8,7 @@ function deployUsingLocalAddons() { $helmEnv = "pg2" if (-not (Get-ChildItem $stressTestAddonsFolder)) { - Write-Host "Can't find the the new stress test adons folder at $stressTestAddonsFolder" + Write-Host "Can't find the the new stress test addons folder at $stressTestAddonsFolder" return } diff --git a/sdk/messaging/azservicebus/internal/stress/templates/stress-test-job.yaml b/sdk/messaging/azservicebus/internal/stress/templates/stress-test-job.yaml index b183e545ffd8..5fb46c12b168 100644 --- a/sdk/messaging/azservicebus/internal/stress/templates/stress-test-job.yaml +++ b/sdk/messaging/azservicebus/internal/stress/templates/stress-test-job.yaml @@ -17,7 +17,7 @@ spec: - > set -ex; mkdir -p "$DEBUG_SHARE"; - /app/stress tests "{{ .Stress.testTarget }}" | tee -a "${DEBUG_SHARE}/{{ .Stress.Scenario }}-`date +%s`.log"; + /app/stress tests "{{ .Stress.testTarget }}" 2>&1 | tee -a "${DEBUG_SHARE}/{{ .Stress.Scenario }}-`date +%s`.log"; # Pulls the image on pod start, always. We tend to push to the same image and tag over and over again # when iterating, so this is a must. imagePullPolicy: Always From 20b4dd8c3e30d468d33d6ad55e0a1dbaa4705abe Mon Sep 17 00:00:00 2001 From: Joel Hendrix Date: Wed, 3 May 2023 08:12:33 -0700 Subject: [PATCH 09/50] Update changelog with latest features (#20730) * Update changelog with latest features Prepare for upcoming release. * bump minor version --- sdk/azcore/CHANGELOG.md | 10 +++------- sdk/azcore/internal/shared/constants.go | 2 +- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/sdk/azcore/CHANGELOG.md b/sdk/azcore/CHANGELOG.md index eb50de41dee3..7ecc8f2a9679 100644 --- a/sdk/azcore/CHANGELOG.md +++ b/sdk/azcore/CHANGELOG.md @@ -1,14 +1,10 @@ # Release History -## 1.5.1 (Unreleased) +## 1.6.0 (2023-05-04) ### Features Added - -### Breaking Changes - -### Bugs Fixed - -### Other Changes +* Added support for ARM cross-tenant authentication. Set the `AuxiliaryTenants` field of `arm.ClientOptions` to enable. +* Added `TenantID` field to `policy.TokenRequestOptions`. ## 1.5.0 (2023-04-06) diff --git a/sdk/azcore/internal/shared/constants.go b/sdk/azcore/internal/shared/constants.go index 9d6bc39c0a60..681167bcba57 100644 --- a/sdk/azcore/internal/shared/constants.go +++ b/sdk/azcore/internal/shared/constants.go @@ -32,5 +32,5 @@ const ( Module = "azcore" // Version is the semantic version (see http://semver.org) of this module. - Version = "v1.5.1" + Version = "v1.6.0" ) From 745d967e27046474eac60690c8c44847f5e590f5 Mon Sep 17 00:00:00 2001 From: Azure SDK Bot <53356347+azure-sdk@users.noreply.github.com> Date: Wed, 3 May 2023 11:53:42 -0700 Subject: [PATCH 10/50] pass along the artifact name so we can override it later (#20732) Co-authored-by: scbedd <45376673+scbedd@users.noreply.github.com> --- eng/common/pipelines/templates/steps/detect-api-changes.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/eng/common/pipelines/templates/steps/detect-api-changes.yml b/eng/common/pipelines/templates/steps/detect-api-changes.yml index 833175ad7543..5210ebbe4b61 100644 --- a/eng/common/pipelines/templates/steps/detect-api-changes.yml +++ b/eng/common/pipelines/templates/steps/detect-api-changes.yml @@ -1,6 +1,7 @@ parameters: ArtifactPath: $(Build.ArtifactStagingDirectory) Artifacts: [] + ArtifactName: 'packages' steps: - pwsh: | @@ -20,6 +21,7 @@ steps: -PullRequestNumber $(System.PullRequest.PullRequestNumber) -RepoFullName $(Build.Repository.Name) -APIViewUri $(ApiChangeDetectRequestUrl) + -ArtifactName ${{ parameters.ArtifactName }} pwsh: true displayName: Detect API changes condition: and(succeededOrFailed(), eq(variables['Build.Reason'],'PullRequest')) From 6dfd0cbd7c2796dbb3836edfbb712badbc8fbc4b Mon Sep 17 00:00:00 2001 From: Richard Park <51494936+richardpark-msft@users.noreply.github.com> Date: Wed, 3 May 2023 15:26:42 -0700 Subject: [PATCH 11/50] [azeventhubs] Fixing checkpoint store race condition (#20727) The checkpoint store wasn't guarding against multiple owners claiming for the first time - fixing this by using IfNoneMatch Fixes #20717 --- sdk/messaging/azeventhubs/CHANGELOG.md | 1 + .../azeventhubs/checkpoints/blob_store.go | 80 ++++++++---- .../checkpoints/blob_store_test.go | 121 ++++++++++++++++++ .../azeventhubs/internal/test/test_helpers.go | 7 + 4 files changed, 183 insertions(+), 26 deletions(-) diff --git a/sdk/messaging/azeventhubs/CHANGELOG.md b/sdk/messaging/azeventhubs/CHANGELOG.md index fede8f8742ce..7cfbfb33ee54 100644 --- a/sdk/messaging/azeventhubs/CHANGELOG.md +++ b/sdk/messaging/azeventhubs/CHANGELOG.md @@ -14,6 +14,7 @@ - Potential leaks for $cbs and $management when there was a partial failure. (PR#20564) - Latest go-amqp changes have been merged in with fixes for robustness. - Sending a message to an entity that is full will no longer retry. (PR#20722) +- Checkpoint store handles multiple initial owners properly, allowing only one through. (PR#20727) ## 0.6.0 (2023-03-07) diff --git a/sdk/messaging/azeventhubs/checkpoints/blob_store.go b/sdk/messaging/azeventhubs/checkpoints/blob_store.go index 453265e77c60..e8a134643603 100644 --- a/sdk/messaging/azeventhubs/checkpoints/blob_store.go +++ b/sdk/messaging/azeventhubs/checkpoints/blob_store.go @@ -44,6 +44,9 @@ func NewBlobStore(containerClient *container.Client, options *BlobStoreOptions) // ClaimOwnership attempts to claim ownership of the partitions in partitionOwnership and returns // the actual partitions that were claimed. +// +// If we fail to claim ownership because of another update then it will be omitted from the +// returned slice of [Ownership]'s. It is not considered an error. func (b *BlobStore) ClaimOwnership(ctx context.Context, partitionOwnership []azeventhubs.Ownership, options *azeventhubs.ClaimOwnershipOptions) ([]azeventhubs.Ownership, error) { var ownerships []azeventhubs.Ownership @@ -54,13 +57,12 @@ func (b *BlobStore) ClaimOwnership(ctx context.Context, partitionOwnership []aze if err != nil { return nil, err } - lastModified, etag, err := b.setMetadata(ctx, blobName, newOwnershipBlobMetadata(po), po.ETag) + lastModified, etag, err := b.setOwnershipMetadata(ctx, blobName, po) if err != nil { - if bloberror.HasCode(err, bloberror.ConditionNotMet) { - // we can fail to claim ownership and that's okay - it's expected that clients will - // attempt to claim with whatever state they hold locally. If they fail it just means - // someone else claimed ownership before them. + if bloberror.HasCode(err, + bloberror.ConditionNotMet, // updated before we could update it + bloberror.BlobAlreadyExists) { // created before we could create it continue } @@ -179,6 +181,8 @@ func (b *BlobStore) ListOwnership(ctx context.Context, fullyQualifiedNamespace s } // UpdateCheckpoint updates a specific checkpoint with a sequence and offset. +// +// NOTE: This function doesn't attempt to prevent simultaneous checkpoint updates - ownership is assumed. func (b *BlobStore) UpdateCheckpoint(ctx context.Context, checkpoint azeventhubs.Checkpoint, options *azeventhubs.UpdateCheckpointOptions) error { blobName, err := nameForCheckpointBlob(checkpoint) @@ -186,18 +190,19 @@ func (b *BlobStore) UpdateCheckpoint(ctx context.Context, checkpoint azeventhubs return err } - _, _, err = b.setMetadata(ctx, blobName, newCheckpointBlobMetadata(checkpoint), nil) + _, _, err = b.setCheckpointMetadata(ctx, blobName, checkpoint) return err } -func (b *BlobStore) setMetadata(ctx context.Context, blobName string, blobMetadata map[string]*string, etag *azcore.ETag) (*time.Time, azcore.ETag, error) { +func (b *BlobStore) setOwnershipMetadata(ctx context.Context, blobName string, ownership azeventhubs.Ownership) (*time.Time, azcore.ETag, error) { + blobMetadata := newOwnershipBlobMetadata(ownership) blobClient := b.cc.NewBlockBlobClient(blobName) - if etag != nil { + if ownership.ETag != nil { setMetadataResp, err := blobClient.SetMetadata(ctx, blobMetadata, &blob.SetMetadataOptions{ AccessConditions: &blob.AccessConditions{ ModifiedAccessConditions: &blob.ModifiedAccessConditions{ - IfMatch: etag, + IfMatch: ownership.ETag, }, }, }) @@ -207,29 +212,52 @@ func (b *BlobStore) setMetadata(ctx context.Context, blobName string, blobMetada } return setMetadataResp.LastModified, *setMetadataResp.ETag, nil - } else { - setMetadataResp, err := blobClient.SetMetadata(ctx, blobMetadata, nil) + } - if err == nil { - return setMetadataResp.LastModified, *setMetadataResp.ETag, nil - } + uploadResp, err := blobClient.Upload(ctx, streaming.NopCloser(bytes.NewReader([]byte{})), &blockblob.UploadOptions{ + Metadata: blobMetadata, + AccessConditions: &blob.AccessConditions{ + ModifiedAccessConditions: &blob.ModifiedAccessConditions{ + IfNoneMatch: to.Ptr(azcore.ETag("*")), + }, + }, + }) - if !bloberror.HasCode(err, bloberror.BlobNotFound) { - return nil, "", err - } + if err != nil { + return nil, "", err + } - // in JS they check to see if the error is BlobNotFound. If it is, then they - // do a full upload of a blob instead. - uploadResp, err := blobClient.Upload(ctx, streaming.NopCloser(bytes.NewReader([]byte{})), &blockblob.UploadOptions{ - Metadata: blobMetadata, - }) + return uploadResp.LastModified, *uploadResp.ETag, nil +} - if err != nil { - return nil, "", err - } +// setCheckpointMetadata sets the metadata for a checkpoint, falling back to creating +// the blob if it doesn't already exist. +// +// NOTE: unlike [setOwnershipMetadata] this function doesn't attempt to prevent simultaneous +// checkpoint updates - ownership is assumed. +func (b *BlobStore) setCheckpointMetadata(ctx context.Context, blobName string, checkpoint azeventhubs.Checkpoint) (*time.Time, azcore.ETag, error) { + blobMetadata := newCheckpointBlobMetadata(checkpoint) + blobClient := b.cc.NewBlockBlobClient(blobName) + + setMetadataResp, err := blobClient.SetMetadata(ctx, blobMetadata, nil) + + if err == nil { + return setMetadataResp.LastModified, *setMetadataResp.ETag, nil + } - return uploadResp.LastModified, *uploadResp.ETag, nil + if !bloberror.HasCode(err, bloberror.BlobNotFound) { + return nil, "", err } + + uploadResp, err := blobClient.Upload(ctx, streaming.NopCloser(bytes.NewReader([]byte{})), &blockblob.UploadOptions{ + Metadata: blobMetadata, + }) + + if err != nil { + return nil, "", err + } + + return uploadResp.LastModified, *uploadResp.ETag, nil } func nameForCheckpointBlob(a azeventhubs.Checkpoint) (string, error) { diff --git a/sdk/messaging/azeventhubs/checkpoints/blob_store_test.go b/sdk/messaging/azeventhubs/checkpoints/blob_store_test.go index 0537e60ac5e6..4605fdcb33ba 100644 --- a/sdk/messaging/azeventhubs/checkpoints/blob_store_test.go +++ b/sdk/messaging/azeventhubs/checkpoints/blob_store_test.go @@ -4,6 +4,7 @@ package checkpoints_test import ( "context" + "fmt" "os" "strconv" "testing" @@ -216,6 +217,126 @@ func TestBlobStore_ListAndClaim(t *testing.T) { require.Empty(t, claimedOwnerships) } +func TestBlobStore_OnlyOneOwnershipClaimSucceeds(t *testing.T) { + testData := getContainerClient(t) + defer testData.Cleanup() + + cc, err := container.NewClientFromConnectionString(testData.ConnectionString, testData.ContainerName, nil) + require.NoError(t, err) + + store, err := checkpoints.NewBlobStore(cc, nil) + require.NoError(t, err) + + // we're going to make multiple calls to the blob store but only _one_ should succeed + // since it's "first one in wins" + claimsCh := make(chan []azeventhubs.Ownership, 20) + + t.Logf("Starting %d goroutines to claim ownership without an etag", cap(claimsCh)) + + // attempt to claim the same partition from multiple goroutines. Only _one_ of the + // goroutines should walk away thinking it claimed the partition. + for i := 0; i < cap(claimsCh); i++ { + go func() { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + ownerships, err := store.ClaimOwnership(ctx, []azeventhubs.Ownership{ + {ConsumerGroup: azeventhubs.DefaultConsumerGroup, EventHubName: "name", FullyQualifiedNamespace: "ns", PartitionID: "0", OwnerID: "ownerID"}, + }, nil) + + if err != nil { + claimsCh <- nil + require.NoError(t, err) + } else { + claimsCh <- ownerships + } + }() + } + + claimed := map[string]bool{} + numFailedClaims := 0 + + for i := 0; i < cap(claimsCh); i++ { + claims := <-claimsCh + + if claims == nil { + numFailedClaims++ + continue + } + + for _, claim := range claims { + require.False(t, claimed[claim.PartitionID], fmt.Sprintf("Partition ID %s was claimed more than once", claim.PartitionID)) + require.NotNil(t, claim.ETag) + claimed[claim.PartitionID] = true + } + } + + require.Equal(t, cap(claimsCh)-1, numFailedClaims, fmt.Sprintf("One of the 1/%d wins and the rest all fail to claim", cap(claimsCh))) +} + +func TestBlobStore_OnlyOneOwnershipUpdateSucceeds(t *testing.T) { + testData := getContainerClient(t) + defer testData.Cleanup() + + cc, err := container.NewClientFromConnectionString(testData.ConnectionString, testData.ContainerName, nil) + require.NoError(t, err) + + store, err := checkpoints.NewBlobStore(cc, nil) + require.NoError(t, err) + + // we're going to make multiple calls to the blob store but only _one_ should succeed + // since it's "first one in wins" + claimsCh := make(chan []azeventhubs.Ownership, 20) + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + ownerships, err := store.ClaimOwnership(ctx, []azeventhubs.Ownership{ + {ConsumerGroup: azeventhubs.DefaultConsumerGroup, EventHubName: "name", FullyQualifiedNamespace: "ns", PartitionID: "0", OwnerID: "ownerID"}, + }, nil) + require.NoError(t, err) + require.Equal(t, "0", ownerships[0].PartitionID) + require.NotNil(t, ownerships[0].ETag) + + t.Logf("Starting %d goroutines to claim ownership without an etag", cap(claimsCh)) + + // attempt to claim the same partition from multiple goroutines. Only _one_ of the + // goroutines should walk away thinking it claimed the partition. + for i := 0; i < cap(claimsCh); i++ { + go func() { + + ownerships, err := store.ClaimOwnership(ctx, ownerships, nil) + + if err != nil { + claimsCh <- nil + require.NoError(t, err) + } else { + claimsCh <- ownerships + } + }() + } + + claimed := map[string]bool{} + numFailedClaims := 0 + + for i := 0; i < cap(claimsCh); i++ { + claims := <-claimsCh + + if claims == nil { + numFailedClaims++ + continue + } + + for _, claim := range claims { + require.False(t, claimed[claim.PartitionID], fmt.Sprintf("Partition ID %s was claimed more than once", claim.PartitionID)) + require.NotNil(t, claim.ETag) + claimed[claim.PartitionID] = true + } + } + + require.Equal(t, cap(claimsCh)-1, numFailedClaims, fmt.Sprintf("One of the 1/%d wins and the rest all fail to claim", cap(claimsCh))) +} + func getContainerClient(t *testing.T) struct { ConnectionString string ContainerName string diff --git a/sdk/messaging/azeventhubs/internal/test/test_helpers.go b/sdk/messaging/azeventhubs/internal/test/test_helpers.go index bfe1aa3c1bb0..97f7e5cfad83 100644 --- a/sdk/messaging/azeventhubs/internal/test/test_helpers.go +++ b/sdk/messaging/azeventhubs/internal/test/test_helpers.go @@ -40,6 +40,13 @@ func CaptureLogsForTest() func() []string { func CaptureLogsForTestWithChannel(messagesCh chan string) func() []string { setAzLogListener(func(e azlog.Event, s string) { + defer func() { + if err := recover(); err != nil { + fmt.Printf("FAILED SENDING MESSAGE (%s), message was: [%s] %s\n", err, e, s) + panic(err) + } + }() + messagesCh <- fmt.Sprintf("[%s] %s", e, s) }) From ed7f3c719ea35a75734ac690e8fd02b17adbe4d7 Mon Sep 17 00:00:00 2001 From: Charles Lowell <10964656+chlowell@users.noreply.github.com> Date: Wed, 3 May 2023 16:33:01 -0700 Subject: [PATCH 12/50] Fix azidentity troubleshooting guide link (#20736) --- sdk/azidentity/TROUBLESHOOTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/azidentity/TROUBLESHOOTING.md b/sdk/azidentity/TROUBLESHOOTING.md index a6c1c5ca30ab..7b7515ebac22 100644 --- a/sdk/azidentity/TROUBLESHOOTING.md +++ b/sdk/azidentity/TROUBLESHOOTING.md @@ -80,7 +80,7 @@ azlog.SetEvents(azidentity.EventAuthentication) | Error |Description| Mitigation | |---|---|---| -|"DefaultAzureCredential failed to acquire a token"|No credential in the `DefaultAzureCredential` chain provided a token|
  • [Enable logging](#enable-and-configure-logging) to get further diagnostic information.
  • Consult the troubleshooting guide for underlying credential types for more information.
    • [EnvironmentCredential](#troubleshoot-environmentcredential-authentication-issues)
    • [ManagedIdentityCredential](#troubleshoot-visualstudiocredential-authentication-issues)
    • [AzureCLICredential](#troubleshoot-azureclicredential-authentication-issues)
    | +|"DefaultAzureCredential failed to acquire a token"|No credential in the `DefaultAzureCredential` chain provided a token|
    • [Enable logging](#enable-and-configure-logging) to get further diagnostic information.
    • Consult the troubleshooting guide for underlying credential types for more information.
      • [EnvironmentCredential](#troubleshoot-environmentcredential-authentication-issues)
      • [ManagedIdentityCredential](#troubleshoot-managedidentitycredential-authentication-issues)
      • [AzureCLICredential](#troubleshoot-azureclicredential-authentication-issues)
      | |Error from the client with a status code of 401 or 403|Authentication succeeded but the authorizing Azure service responded with a 401 (Unauthorized), or 403 (Forbidden) status code|
      • [Enable logging](#enable-and-configure-logging) to determine which credential in the chain returned the authenticating token.
      • If an unexpected credential is returning a token, check application configuration such as environment variables.
      • Ensure the correct role is assigned to the authenticated identity. For example, a service specific role rather than the subscription Owner role.
      | ## Troubleshoot EnvironmentCredential authentication issues From b2cddab175e50e5e392f16d0b7b2745fafe51fe4 Mon Sep 17 00:00:00 2001 From: Peng Jiahui <46921893+Alancere@users.noreply.github.com> Date: Thu, 4 May 2023 10:26:03 +0800 Subject: [PATCH 13/50] [Release] sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/0.1.0 (#20437) * [Release] sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/0.1.0 generation from spec commit: 85fb4ac6f8bfefd179e6c2632976a154b5c9ff04 * client factory * fix * fix * update --- .../armpanngfw/CHANGELOG.md | 7 + .../armpanngfw/LICENSE.txt | 21 + .../paloaltonetworksngfw/armpanngfw/README.md | 85 + .../armpanngfw/autorest.md | 13 + .../paloaltonetworksngfw/armpanngfw/build.go | 7 + ...certificateobjectglobalrulestack_client.go | 284 ++ ...jectglobalrulestack_client_example_test.go | 299 ++ .../certificateobjectlocalrulestack_client.go | 323 ++ ...bjectlocalrulestack_client_example_test.go | 299 ++ .../paloaltonetworksngfw/armpanngfw/ci.yml | 28 + .../armpanngfw/client_factory.go | 109 + .../armpanngfw/constants.go | 488 +++ .../armpanngfw/firewalls_client.go | 647 +++ .../firewalls_client_example_test.go | 1734 ++++++++ .../armpanngfw/firewallstatus_client.go | 172 + .../firewallstatus_client_example_test.go | 170 + .../fqdnlistglobalrulestack_client.go | 284 ++ ...listglobalrulestack_client_example_test.go | 311 ++ .../fqdnlistlocalrulestack_client.go | 323 ++ ...nlistlocalrulestack_client_example_test.go | 311 ++ .../armpanngfw/globalrulestack_client.go | 791 ++++ .../globalrulestack_client_example_test.go | 987 +++++ .../paloaltonetworksngfw/armpanngfw/go.mod | 21 + .../paloaltonetworksngfw/armpanngfw/go.sum | 31 + .../armpanngfw/localrules_client.go | 509 +++ .../localrules_client_example_test.go | 629 +++ .../armpanngfw/localrulestacks_client.go | 1041 +++++ .../localrulestacks_client_example_test.go | 1171 ++++++ .../paloaltonetworksngfw/armpanngfw/models.go | 1924 +++++++++ .../armpanngfw/models_serde.go | 3530 +++++++++++++++++ .../armpanngfw/operations_client.go | 94 + .../operations_client_example_test.go | 85 + .../armpanngfw/postrules_client.go | 441 ++ .../postrules_client_example_test.go | 629 +++ .../prefixlistglobalrulestack_client.go | 284 ++ ...listglobalrulestack_client_example_test.go | 303 ++ .../prefixlistlocalrulestack_client.go | 323 ++ ...xlistlocalrulestack_client_example_test.go | 303 ++ .../armpanngfw/prerules_client.go | 441 ++ .../prerules_client_example_test.go | 629 +++ .../armpanngfw/response_types.go | 450 +++ .../armpanngfw/time_rfc3339.go | 87 + 42 files changed, 20618 insertions(+) create mode 100644 sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/CHANGELOG.md create mode 100644 sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/LICENSE.txt create mode 100644 sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/README.md create mode 100644 sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/autorest.md create mode 100644 sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/build.go create mode 100644 sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/certificateobjectglobalrulestack_client.go create mode 100644 sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/certificateobjectglobalrulestack_client_example_test.go create mode 100644 sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/certificateobjectlocalrulestack_client.go create mode 100644 sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/certificateobjectlocalrulestack_client_example_test.go create mode 100644 sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/ci.yml create mode 100644 sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/client_factory.go create mode 100644 sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/constants.go create mode 100644 sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/firewalls_client.go create mode 100644 sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/firewalls_client_example_test.go create mode 100644 sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/firewallstatus_client.go create mode 100644 sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/firewallstatus_client_example_test.go create mode 100644 sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/fqdnlistglobalrulestack_client.go create mode 100644 sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/fqdnlistglobalrulestack_client_example_test.go create mode 100644 sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/fqdnlistlocalrulestack_client.go create mode 100644 sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/fqdnlistlocalrulestack_client_example_test.go create mode 100644 sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/globalrulestack_client.go create mode 100644 sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/globalrulestack_client_example_test.go create mode 100644 sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/go.mod create mode 100644 sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/go.sum create mode 100644 sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/localrules_client.go create mode 100644 sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/localrules_client_example_test.go create mode 100644 sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/localrulestacks_client.go create mode 100644 sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/localrulestacks_client_example_test.go create mode 100644 sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/models.go create mode 100644 sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/models_serde.go create mode 100644 sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/operations_client.go create mode 100644 sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/operations_client_example_test.go create mode 100644 sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/postrules_client.go create mode 100644 sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/postrules_client_example_test.go create mode 100644 sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/prefixlistglobalrulestack_client.go create mode 100644 sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/prefixlistglobalrulestack_client_example_test.go create mode 100644 sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/prefixlistlocalrulestack_client.go create mode 100644 sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/prefixlistlocalrulestack_client_example_test.go create mode 100644 sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/prerules_client.go create mode 100644 sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/prerules_client_example_test.go create mode 100644 sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/response_types.go create mode 100644 sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/time_rfc3339.go diff --git a/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/CHANGELOG.md b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/CHANGELOG.md new file mode 100644 index 000000000000..8033ec9495db --- /dev/null +++ b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/CHANGELOG.md @@ -0,0 +1,7 @@ +# Release History + +## 0.1.0 (2023-04-28) + +The package of `github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw` is using our [next generation design principles](https://azure.github.io/azure-sdk/general_introduction.html). + +To learn more, please refer to our documentation [Quick Start](https://aka.ms/azsdk/go/mgmt). \ No newline at end of file diff --git a/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/LICENSE.txt b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/LICENSE.txt new file mode 100644 index 000000000000..dc0c2ffb3dc1 --- /dev/null +++ b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Microsoft Corporation. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/README.md b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/README.md new file mode 100644 index 000000000000..da161be94d23 --- /dev/null +++ b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/README.md @@ -0,0 +1,85 @@ +# Azure PaloAltoNetworks Module for Go + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw)](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw) + +The `armpanngfw` module provides operations for working with Azure PaloAltoNetworks. + +[Source code](https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw) + +# Getting started + +## Prerequisites + +- an [Azure subscription](https://azure.microsoft.com/free/) +- Go 1.18 or above (You could download and install the latest version of Go from [here](https://go.dev/doc/install). It will replace the existing Go on your machine. If you want to install multiple Go versions on the same machine, you could refer this [doc](https://go.dev/doc/manage-install).) + +## Install the package + +This project uses [Go modules](https://github.com/golang/go/wiki/Modules) for versioning and dependency management. + +Install the Azure PaloAltoNetworks module: + +```sh +go get github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw +``` + +## Authorization + +When creating a client, you will need to provide a credential for authenticating with Azure PaloAltoNetworks. The `azidentity` module provides facilities for various ways of authenticating with Azure including client/secret, certificate, managed identity, and more. + +```go +cred, err := azidentity.NewDefaultAzureCredential(nil) +``` + +For more information on authentication, please see the documentation for `azidentity` at [pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity). + +## Client Factory + +Azure PaloAltoNetworks module consists of one or more clients. We provide a client factory which could be used to create any client in this module. + +```go +clientFactory, err := armpanngfw.NewClientFactory(, cred, nil) +``` + +You can use `ClientOptions` in package `github.com/Azure/azure-sdk-for-go/sdk/azcore/arm` to set endpoint to connect with public and sovereign clouds as well as Azure Stack. For more information, please see the documentation for `azcore` at [pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore). + +```go +options := arm.ClientOptions { + ClientOptions: azcore.ClientOptions { + Cloud: cloud.AzureChina, + }, +} +clientFactory, err := armpanngfw.NewClientFactory(, cred, &options) +``` + +## Clients + +A client groups a set of related APIs, providing access to its functionality. Create one or more clients to access the APIs you require using client factory. + +```go +client := clientFactory.NewLocalRulesClient() +``` + +## Provide Feedback + +If you encounter bugs or have suggestions, please +[open an issue](https://github.com/Azure/azure-sdk-for-go/issues) and assign the `PaloAltoNetworks` label. + +# Contributing + +This project welcomes contributions and suggestions. Most contributions require +you to agree to a Contributor License Agreement (CLA) declaring that you have +the right to, and actually do, grant us the rights to use your contribution. +For details, visit [https://cla.microsoft.com](https://cla.microsoft.com). + +When you submit a pull request, a CLA-bot will automatically determine whether +you need to provide a CLA and decorate the PR appropriately (e.g., label, +comment). Simply follow the instructions provided by the bot. You will only +need to do this once across all repos using our CLA. + +This project has adopted the +[Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information, see the +[Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) +or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any +additional questions or comments. \ No newline at end of file diff --git a/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/autorest.md b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/autorest.md new file mode 100644 index 000000000000..d3112cc93b6d --- /dev/null +++ b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/autorest.md @@ -0,0 +1,13 @@ +### AutoRest Configuration + +> see https://aka.ms/autorest + +``` yaml +azure-arm: true +require: +- https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/readme.md +- https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/readme.go.md +license-header: MICROSOFT_MIT_NO_VERSION +module-version: 0.1.0 + +``` \ No newline at end of file diff --git a/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/build.go b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/build.go new file mode 100644 index 000000000000..ce99c5410442 --- /dev/null +++ b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/build.go @@ -0,0 +1,7 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +// This file enables 'go generate' to regenerate this specific SDK +//go:generate pwsh ../../../../eng/scripts/build.ps1 -skipBuild -cleanGenerated -format -tidy -generate -alwaysSetBodyParamRequired -removeUnreferencedTypes resourcemanager/paloaltonetworksngfw/armpanngfw + +package armpanngfw diff --git a/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/certificateobjectglobalrulestack_client.go b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/certificateobjectglobalrulestack_client.go new file mode 100644 index 000000000000..b40ac8daa3d4 --- /dev/null +++ b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/certificateobjectglobalrulestack_client.go @@ -0,0 +1,284 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armpanngfw + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// CertificateObjectGlobalRulestackClient contains the methods for the CertificateObjectGlobalRulestack group. +// Don't use this type directly, use NewCertificateObjectGlobalRulestackClient() instead. +type CertificateObjectGlobalRulestackClient struct { + internal *arm.Client +} + +// NewCertificateObjectGlobalRulestackClient creates a new instance of CertificateObjectGlobalRulestackClient with the specified values. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewCertificateObjectGlobalRulestackClient(credential azcore.TokenCredential, options *arm.ClientOptions) (*CertificateObjectGlobalRulestackClient, error) { + cl, err := arm.NewClient(moduleName+".CertificateObjectGlobalRulestackClient", moduleVersion, credential, options) + if err != nil { + return nil, err + } + client := &CertificateObjectGlobalRulestackClient{ + internal: cl, + } + return client, nil +} + +// BeginCreateOrUpdate - Create a CertificateObjectGlobalRulestackResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - globalRulestackName - GlobalRulestack resource name +// - name - certificate name +// - resource - Resource create parameters. +// - options - CertificateObjectGlobalRulestackClientBeginCreateOrUpdateOptions contains the optional parameters for the CertificateObjectGlobalRulestackClient.BeginCreateOrUpdate +// method. +func (client *CertificateObjectGlobalRulestackClient) BeginCreateOrUpdate(ctx context.Context, globalRulestackName string, name string, resource CertificateObjectGlobalRulestackResource, options *CertificateObjectGlobalRulestackClientBeginCreateOrUpdateOptions) (*runtime.Poller[CertificateObjectGlobalRulestackClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, globalRulestackName, name, resource, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[CertificateObjectGlobalRulestackClientCreateOrUpdateResponse]{ + FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + }) + } else { + return runtime.NewPollerFromResumeToken[CertificateObjectGlobalRulestackClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// CreateOrUpdate - Create a CertificateObjectGlobalRulestackResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +func (client *CertificateObjectGlobalRulestackClient) createOrUpdate(ctx context.Context, globalRulestackName string, name string, resource CertificateObjectGlobalRulestackResource, options *CertificateObjectGlobalRulestackClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, globalRulestackName, name, resource, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *CertificateObjectGlobalRulestackClient) createOrUpdateCreateRequest(ctx context.Context, globalRulestackName string, name string, resource CertificateObjectGlobalRulestackResource, options *CertificateObjectGlobalRulestackClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/{globalRulestackName}/certificates/{name}" + if globalRulestackName == "" { + return nil, errors.New("parameter globalRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{globalRulestackName}", url.PathEscape(globalRulestackName)) + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, resource) +} + +// BeginDelete - Delete a CertificateObjectGlobalRulestackResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - globalRulestackName - GlobalRulestack resource name +// - name - certificate name +// - options - CertificateObjectGlobalRulestackClientBeginDeleteOptions contains the optional parameters for the CertificateObjectGlobalRulestackClient.BeginDelete +// method. +func (client *CertificateObjectGlobalRulestackClient) BeginDelete(ctx context.Context, globalRulestackName string, name string, options *CertificateObjectGlobalRulestackClientBeginDeleteOptions) (*runtime.Poller[CertificateObjectGlobalRulestackClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, globalRulestackName, name, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[CertificateObjectGlobalRulestackClientDeleteResponse]{ + FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + }) + } else { + return runtime.NewPollerFromResumeToken[CertificateObjectGlobalRulestackClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// Delete - Delete a CertificateObjectGlobalRulestackResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +func (client *CertificateObjectGlobalRulestackClient) deleteOperation(ctx context.Context, globalRulestackName string, name string, options *CertificateObjectGlobalRulestackClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, globalRulestackName, name, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *CertificateObjectGlobalRulestackClient) deleteCreateRequest(ctx context.Context, globalRulestackName string, name string, options *CertificateObjectGlobalRulestackClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/{globalRulestackName}/certificates/{name}" + if globalRulestackName == "" { + return nil, errors.New("parameter globalRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{globalRulestackName}", url.PathEscape(globalRulestackName)) + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Get a CertificateObjectGlobalRulestackResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - globalRulestackName - GlobalRulestack resource name +// - name - certificate name +// - options - CertificateObjectGlobalRulestackClientGetOptions contains the optional parameters for the CertificateObjectGlobalRulestackClient.Get +// method. +func (client *CertificateObjectGlobalRulestackClient) Get(ctx context.Context, globalRulestackName string, name string, options *CertificateObjectGlobalRulestackClientGetOptions) (CertificateObjectGlobalRulestackClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, globalRulestackName, name, options) + if err != nil { + return CertificateObjectGlobalRulestackClientGetResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return CertificateObjectGlobalRulestackClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return CertificateObjectGlobalRulestackClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *CertificateObjectGlobalRulestackClient) getCreateRequest(ctx context.Context, globalRulestackName string, name string, options *CertificateObjectGlobalRulestackClientGetOptions) (*policy.Request, error) { + urlPath := "/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/{globalRulestackName}/certificates/{name}" + if globalRulestackName == "" { + return nil, errors.New("parameter globalRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{globalRulestackName}", url.PathEscape(globalRulestackName)) + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *CertificateObjectGlobalRulestackClient) getHandleResponse(resp *http.Response) (CertificateObjectGlobalRulestackClientGetResponse, error) { + result := CertificateObjectGlobalRulestackClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.CertificateObjectGlobalRulestackResource); err != nil { + return CertificateObjectGlobalRulestackClientGetResponse{}, err + } + return result, nil +} + +// NewListPager - List CertificateObjectGlobalRulestackResource resources by Tenant +// +// Generated from API version 2022-08-29-preview +// - globalRulestackName - GlobalRulestack resource name +// - options - CertificateObjectGlobalRulestackClientListOptions contains the optional parameters for the CertificateObjectGlobalRulestackClient.NewListPager +// method. +func (client *CertificateObjectGlobalRulestackClient) NewListPager(globalRulestackName string, options *CertificateObjectGlobalRulestackClientListOptions) *runtime.Pager[CertificateObjectGlobalRulestackClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[CertificateObjectGlobalRulestackClientListResponse]{ + More: func(page CertificateObjectGlobalRulestackClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *CertificateObjectGlobalRulestackClientListResponse) (CertificateObjectGlobalRulestackClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, globalRulestackName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return CertificateObjectGlobalRulestackClientListResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return CertificateObjectGlobalRulestackClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return CertificateObjectGlobalRulestackClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *CertificateObjectGlobalRulestackClient) listCreateRequest(ctx context.Context, globalRulestackName string, options *CertificateObjectGlobalRulestackClientListOptions) (*policy.Request, error) { + urlPath := "/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/{globalRulestackName}/certificates" + if globalRulestackName == "" { + return nil, errors.New("parameter globalRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{globalRulestackName}", url.PathEscape(globalRulestackName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *CertificateObjectGlobalRulestackClient) listHandleResponse(resp *http.Response) (CertificateObjectGlobalRulestackClientListResponse, error) { + result := CertificateObjectGlobalRulestackClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.CertificateObjectGlobalRulestackResourceListResult); err != nil { + return CertificateObjectGlobalRulestackClientListResponse{}, err + } + return result, nil +} diff --git a/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/certificateobjectglobalrulestack_client_example_test.go b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/certificateobjectglobalrulestack_client_example_test.go new file mode 100644 index 000000000000..7406b642e34e --- /dev/null +++ b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/certificateobjectglobalrulestack_client_example_test.go @@ -0,0 +1,299 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armpanngfw_test + +import ( + "context" + "log" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw" +) + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/CertificateObjectGlobalRulestack_List_MaximumSet_Gen.json +func ExampleCertificateObjectGlobalRulestackClient_NewListPager_certificateObjectGlobalRulestackListMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + pager := clientFactory.NewCertificateObjectGlobalRulestackClient().NewListPager("praval", nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + log.Fatalf("failed to advance page: %v", err) + } + for _, v := range page.Value { + // You could use page here. We use blank identifier for just demo purposes. + _ = v + } + // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // page.CertificateObjectGlobalRulestackResourceListResult = armpanngfw.CertificateObjectGlobalRulestackResourceListResult{ + // Value: []*armpanngfw.CertificateObjectGlobalRulestackResource{ + // { + // Name: to.Ptr("armid1"), + // Type: to.Ptr("certificates"), + // ID: to.Ptr("/providers/PaloAltoNetworks.Cloudngfw/globalrulestacks/armid1/certificates/armid1"), + // SystemData: &armpanngfw.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // CreatedBy: to.Ptr("praval"), + // CreatedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // LastModifiedBy: to.Ptr("praval"), + // LastModifiedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // }, + // Properties: &armpanngfw.CertificateObject{ + // Description: to.Ptr("desc"), + // AuditComment: to.Ptr("comment"), + // CertificateSelfSigned: to.Ptr(armpanngfw.BooleanEnumTRUE), + // CertificateSignerResourceID: to.Ptr(""), + // Etag: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c27"), + // ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateSucceeded), + // }, + // }}, + // } + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/CertificateObjectGlobalRulestack_List_MinimumSet_Gen.json +func ExampleCertificateObjectGlobalRulestackClient_NewListPager_certificateObjectGlobalRulestackListMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + pager := clientFactory.NewCertificateObjectGlobalRulestackClient().NewListPager("praval", nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + log.Fatalf("failed to advance page: %v", err) + } + for _, v := range page.Value { + // You could use page here. We use blank identifier for just demo purposes. + _ = v + } + // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // page.CertificateObjectGlobalRulestackResourceListResult = armpanngfw.CertificateObjectGlobalRulestackResourceListResult{ + // Value: []*armpanngfw.CertificateObjectGlobalRulestackResource{ + // { + // ID: to.Ptr("/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/praval/certificates/certificates1"), + // Properties: &armpanngfw.CertificateObject{ + // CertificateSelfSigned: to.Ptr(armpanngfw.BooleanEnumTRUE), + // }, + // }}, + // } + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/CertificateObjectGlobalRulestack_Get_MaximumSet_Gen.json +func ExampleCertificateObjectGlobalRulestackClient_Get_certificateObjectGlobalRulestackGetMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewCertificateObjectGlobalRulestackClient().Get(ctx, "praval", "armid1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.CertificateObjectGlobalRulestackResource = armpanngfw.CertificateObjectGlobalRulestackResource{ + // Name: to.Ptr("armid1"), + // Type: to.Ptr("certificates"), + // ID: to.Ptr("/providers/PaloAltoNetworks.Cloudngfw/globalrulestacks/armid1/certificates/armid1"), + // SystemData: &armpanngfw.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // CreatedBy: to.Ptr("praval"), + // CreatedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // LastModifiedBy: to.Ptr("praval"), + // LastModifiedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // }, + // Properties: &armpanngfw.CertificateObject{ + // Description: to.Ptr("description"), + // AuditComment: to.Ptr("comment"), + // CertificateSelfSigned: to.Ptr(armpanngfw.BooleanEnumTRUE), + // CertificateSignerResourceID: to.Ptr(""), + // Etag: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c27"), + // ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateSucceeded), + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/CertificateObjectGlobalRulestack_Get_MinimumSet_Gen.json +func ExampleCertificateObjectGlobalRulestackClient_Get_certificateObjectGlobalRulestackGetMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewCertificateObjectGlobalRulestackClient().Get(ctx, "praval", "armid1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.CertificateObjectGlobalRulestackResource = armpanngfw.CertificateObjectGlobalRulestackResource{ + // ID: to.Ptr("/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/praval/certificates/armid1"), + // Properties: &armpanngfw.CertificateObject{ + // CertificateSelfSigned: to.Ptr(armpanngfw.BooleanEnumTRUE), + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/CertificateObjectGlobalRulestack_CreateOrUpdate_MaximumSet_Gen.json +func ExampleCertificateObjectGlobalRulestackClient_BeginCreateOrUpdate_certificateObjectGlobalRulestackCreateOrUpdateMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewCertificateObjectGlobalRulestackClient().BeginCreateOrUpdate(ctx, "praval", "armid1", armpanngfw.CertificateObjectGlobalRulestackResource{ + Properties: &armpanngfw.CertificateObject{ + Description: to.Ptr("description"), + AuditComment: to.Ptr("comment"), + CertificateSelfSigned: to.Ptr(armpanngfw.BooleanEnumTRUE), + CertificateSignerResourceID: to.Ptr(""), + Etag: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c27"), + ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateAccepted), + }, + }, nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + res, err := poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.CertificateObjectGlobalRulestackResource = armpanngfw.CertificateObjectGlobalRulestackResource{ + // Name: to.Ptr("armid1"), + // Type: to.Ptr("certificates"), + // ID: to.Ptr("/providers/PaloAltoNetworks.Cloudngfw/globalrulestacks/armid1/certificates/armid1"), + // SystemData: &armpanngfw.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // CreatedBy: to.Ptr("praval"), + // CreatedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // LastModifiedBy: to.Ptr("praval"), + // LastModifiedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // }, + // Properties: &armpanngfw.CertificateObject{ + // Description: to.Ptr("description"), + // AuditComment: to.Ptr("comment"), + // CertificateSelfSigned: to.Ptr(armpanngfw.BooleanEnumTRUE), + // CertificateSignerResourceID: to.Ptr(""), + // Etag: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c27"), + // ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateSucceeded), + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/CertificateObjectGlobalRulestack_CreateOrUpdate_MinimumSet_Gen.json +func ExampleCertificateObjectGlobalRulestackClient_BeginCreateOrUpdate_certificateObjectGlobalRulestackCreateOrUpdateMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewCertificateObjectGlobalRulestackClient().BeginCreateOrUpdate(ctx, "praval", "armid1", armpanngfw.CertificateObjectGlobalRulestackResource{ + Properties: &armpanngfw.CertificateObject{ + CertificateSelfSigned: to.Ptr(armpanngfw.BooleanEnumTRUE), + }, + }, nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + res, err := poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.CertificateObjectGlobalRulestackResource = armpanngfw.CertificateObjectGlobalRulestackResource{ + // ID: to.Ptr("/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/praval/certificates/armid1"), + // Properties: &armpanngfw.CertificateObject{ + // CertificateSelfSigned: to.Ptr(armpanngfw.BooleanEnumTRUE), + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/CertificateObjectGlobalRulestack_Delete_MaximumSet_Gen.json +func ExampleCertificateObjectGlobalRulestackClient_BeginDelete_certificateObjectGlobalRulestackDeleteMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewCertificateObjectGlobalRulestackClient().BeginDelete(ctx, "praval", "armid1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/CertificateObjectGlobalRulestack_Delete_MinimumSet_Gen.json +func ExampleCertificateObjectGlobalRulestackClient_BeginDelete_certificateObjectGlobalRulestackDeleteMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewCertificateObjectGlobalRulestackClient().BeginDelete(ctx, "praval", "armid1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } +} diff --git a/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/certificateobjectlocalrulestack_client.go b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/certificateobjectlocalrulestack_client.go new file mode 100644 index 000000000000..34b9a4b35dbf --- /dev/null +++ b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/certificateobjectlocalrulestack_client.go @@ -0,0 +1,323 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armpanngfw + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// CertificateObjectLocalRulestackClient contains the methods for the CertificateObjectLocalRulestack group. +// Don't use this type directly, use NewCertificateObjectLocalRulestackClient() instead. +type CertificateObjectLocalRulestackClient struct { + internal *arm.Client + subscriptionID string +} + +// NewCertificateObjectLocalRulestackClient creates a new instance of CertificateObjectLocalRulestackClient with the specified values. +// - subscriptionID - The ID of the target subscription. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewCertificateObjectLocalRulestackClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*CertificateObjectLocalRulestackClient, error) { + cl, err := arm.NewClient(moduleName+".CertificateObjectLocalRulestackClient", moduleVersion, credential, options) + if err != nil { + return nil, err + } + client := &CertificateObjectLocalRulestackClient{ + subscriptionID: subscriptionID, + internal: cl, + } + return client, nil +} + +// BeginCreateOrUpdate - Create a CertificateObjectLocalRulestackResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - localRulestackName - LocalRulestack resource name +// - name - certificate name +// - resource - Resource create parameters. +// - options - CertificateObjectLocalRulestackClientBeginCreateOrUpdateOptions contains the optional parameters for the CertificateObjectLocalRulestackClient.BeginCreateOrUpdate +// method. +func (client *CertificateObjectLocalRulestackClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, localRulestackName string, name string, resource CertificateObjectLocalRulestackResource, options *CertificateObjectLocalRulestackClientBeginCreateOrUpdateOptions) (*runtime.Poller[CertificateObjectLocalRulestackClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, resourceGroupName, localRulestackName, name, resource, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[CertificateObjectLocalRulestackClientCreateOrUpdateResponse]{ + FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + }) + } else { + return runtime.NewPollerFromResumeToken[CertificateObjectLocalRulestackClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// CreateOrUpdate - Create a CertificateObjectLocalRulestackResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +func (client *CertificateObjectLocalRulestackClient) createOrUpdate(ctx context.Context, resourceGroupName string, localRulestackName string, name string, resource CertificateObjectLocalRulestackResource, options *CertificateObjectLocalRulestackClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, localRulestackName, name, resource, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *CertificateObjectLocalRulestackClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, name string, resource CertificateObjectLocalRulestackResource, options *CertificateObjectLocalRulestackClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}/certificates/{name}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if localRulestackName == "" { + return nil, errors.New("parameter localRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{localRulestackName}", url.PathEscape(localRulestackName)) + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, resource) +} + +// BeginDelete - Delete a CertificateObjectLocalRulestackResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - localRulestackName - LocalRulestack resource name +// - name - certificate name +// - options - CertificateObjectLocalRulestackClientBeginDeleteOptions contains the optional parameters for the CertificateObjectLocalRulestackClient.BeginDelete +// method. +func (client *CertificateObjectLocalRulestackClient) BeginDelete(ctx context.Context, resourceGroupName string, localRulestackName string, name string, options *CertificateObjectLocalRulestackClientBeginDeleteOptions) (*runtime.Poller[CertificateObjectLocalRulestackClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, localRulestackName, name, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[CertificateObjectLocalRulestackClientDeleteResponse]{ + FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + }) + } else { + return runtime.NewPollerFromResumeToken[CertificateObjectLocalRulestackClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// Delete - Delete a CertificateObjectLocalRulestackResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +func (client *CertificateObjectLocalRulestackClient) deleteOperation(ctx context.Context, resourceGroupName string, localRulestackName string, name string, options *CertificateObjectLocalRulestackClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, localRulestackName, name, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *CertificateObjectLocalRulestackClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, name string, options *CertificateObjectLocalRulestackClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}/certificates/{name}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if localRulestackName == "" { + return nil, errors.New("parameter localRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{localRulestackName}", url.PathEscape(localRulestackName)) + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Get a CertificateObjectLocalRulestackResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - localRulestackName - LocalRulestack resource name +// - name - certificate name +// - options - CertificateObjectLocalRulestackClientGetOptions contains the optional parameters for the CertificateObjectLocalRulestackClient.Get +// method. +func (client *CertificateObjectLocalRulestackClient) Get(ctx context.Context, resourceGroupName string, localRulestackName string, name string, options *CertificateObjectLocalRulestackClientGetOptions) (CertificateObjectLocalRulestackClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, localRulestackName, name, options) + if err != nil { + return CertificateObjectLocalRulestackClientGetResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return CertificateObjectLocalRulestackClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return CertificateObjectLocalRulestackClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *CertificateObjectLocalRulestackClient) getCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, name string, options *CertificateObjectLocalRulestackClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}/certificates/{name}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if localRulestackName == "" { + return nil, errors.New("parameter localRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{localRulestackName}", url.PathEscape(localRulestackName)) + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *CertificateObjectLocalRulestackClient) getHandleResponse(resp *http.Response) (CertificateObjectLocalRulestackClientGetResponse, error) { + result := CertificateObjectLocalRulestackClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.CertificateObjectLocalRulestackResource); err != nil { + return CertificateObjectLocalRulestackClientGetResponse{}, err + } + return result, nil +} + +// NewListByLocalRulestacksPager - List CertificateObjectLocalRulestackResource resources by LocalRulestacks +// +// Generated from API version 2022-08-29-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - localRulestackName - LocalRulestack resource name +// - options - CertificateObjectLocalRulestackClientListByLocalRulestacksOptions contains the optional parameters for the CertificateObjectLocalRulestackClient.NewListByLocalRulestacksPager +// method. +func (client *CertificateObjectLocalRulestackClient) NewListByLocalRulestacksPager(resourceGroupName string, localRulestackName string, options *CertificateObjectLocalRulestackClientListByLocalRulestacksOptions) *runtime.Pager[CertificateObjectLocalRulestackClientListByLocalRulestacksResponse] { + return runtime.NewPager(runtime.PagingHandler[CertificateObjectLocalRulestackClientListByLocalRulestacksResponse]{ + More: func(page CertificateObjectLocalRulestackClientListByLocalRulestacksResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *CertificateObjectLocalRulestackClientListByLocalRulestacksResponse) (CertificateObjectLocalRulestackClientListByLocalRulestacksResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listByLocalRulestacksCreateRequest(ctx, resourceGroupName, localRulestackName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return CertificateObjectLocalRulestackClientListByLocalRulestacksResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return CertificateObjectLocalRulestackClientListByLocalRulestacksResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return CertificateObjectLocalRulestackClientListByLocalRulestacksResponse{}, runtime.NewResponseError(resp) + } + return client.listByLocalRulestacksHandleResponse(resp) + }, + }) +} + +// listByLocalRulestacksCreateRequest creates the ListByLocalRulestacks request. +func (client *CertificateObjectLocalRulestackClient) listByLocalRulestacksCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, options *CertificateObjectLocalRulestackClientListByLocalRulestacksOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}/certificates" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if localRulestackName == "" { + return nil, errors.New("parameter localRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{localRulestackName}", url.PathEscape(localRulestackName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listByLocalRulestacksHandleResponse handles the ListByLocalRulestacks response. +func (client *CertificateObjectLocalRulestackClient) listByLocalRulestacksHandleResponse(resp *http.Response) (CertificateObjectLocalRulestackClientListByLocalRulestacksResponse, error) { + result := CertificateObjectLocalRulestackClientListByLocalRulestacksResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.CertificateObjectLocalRulestackResourceListResult); err != nil { + return CertificateObjectLocalRulestackClientListByLocalRulestacksResponse{}, err + } + return result, nil +} diff --git a/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/certificateobjectlocalrulestack_client_example_test.go b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/certificateobjectlocalrulestack_client_example_test.go new file mode 100644 index 000000000000..b29605d1fed7 --- /dev/null +++ b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/certificateobjectlocalrulestack_client_example_test.go @@ -0,0 +1,299 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armpanngfw_test + +import ( + "context" + "log" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw" +) + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/CertificateObjectLocalRulestack_ListByLocalRulestacks_MaximumSet_Gen.json +func ExampleCertificateObjectLocalRulestackClient_NewListByLocalRulestacksPager_certificateObjectLocalRulestackListByLocalRulestacksMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + pager := clientFactory.NewCertificateObjectLocalRulestackClient().NewListByLocalRulestacksPager("rgopenapi", "lrs1", nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + log.Fatalf("failed to advance page: %v", err) + } + for _, v := range page.Value { + // You could use page here. We use blank identifier for just demo purposes. + _ = v + } + // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // page.CertificateObjectLocalRulestackResourceListResult = armpanngfw.CertificateObjectLocalRulestackResourceListResult{ + // Value: []*armpanngfw.CertificateObjectLocalRulestackResource{ + // { + // Name: to.Ptr("armid1"), + // Type: to.Ptr("certificates"), + // ID: to.Ptr("/providers/PaloAltoNetworks.Cloudngfw/globalrulestacks/armid1/certificates/armid1"), + // SystemData: &armpanngfw.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // CreatedBy: to.Ptr("praval"), + // CreatedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // LastModifiedBy: to.Ptr("praval"), + // LastModifiedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // }, + // Properties: &armpanngfw.CertificateObject{ + // Description: to.Ptr("desc"), + // AuditComment: to.Ptr("comment"), + // CertificateSelfSigned: to.Ptr(armpanngfw.BooleanEnumTRUE), + // CertificateSignerResourceID: to.Ptr(""), + // Etag: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c27"), + // ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateSucceeded), + // }, + // }}, + // } + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/CertificateObjectLocalRulestack_ListByLocalRulestacks_MinimumSet_Gen.json +func ExampleCertificateObjectLocalRulestackClient_NewListByLocalRulestacksPager_certificateObjectLocalRulestackListByLocalRulestacksMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + pager := clientFactory.NewCertificateObjectLocalRulestackClient().NewListByLocalRulestacksPager("rgopenapi", "lrs1", nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + log.Fatalf("failed to advance page: %v", err) + } + for _, v := range page.Value { + // You could use page here. We use blank identifier for just demo purposes. + _ = v + } + // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // page.CertificateObjectLocalRulestackResourceListResult = armpanngfw.CertificateObjectLocalRulestackResourceListResult{ + // Value: []*armpanngfw.CertificateObjectLocalRulestackResource{ + // { + // ID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourcegroups/rgopenapi/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/praval/certificates/certificates1"), + // Properties: &armpanngfw.CertificateObject{ + // CertificateSelfSigned: to.Ptr(armpanngfw.BooleanEnumTRUE), + // }, + // }}, + // } + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/CertificateObjectLocalRulestack_Get_MaximumSet_Gen.json +func ExampleCertificateObjectLocalRulestackClient_Get_certificateObjectLocalRulestackGetMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewCertificateObjectLocalRulestackClient().Get(ctx, "rgopenapi", "lrs1", "armid1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.CertificateObjectLocalRulestackResource = armpanngfw.CertificateObjectLocalRulestackResource{ + // Name: to.Ptr("armid1"), + // Type: to.Ptr("certificates"), + // ID: to.Ptr("/providers/PaloAltoNetworks.Cloudngfw/globalrulestacks/armid1/certificates/armid1"), + // SystemData: &armpanngfw.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // CreatedBy: to.Ptr("praval"), + // CreatedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // LastModifiedBy: to.Ptr("praval"), + // LastModifiedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // }, + // Properties: &armpanngfw.CertificateObject{ + // Description: to.Ptr("description"), + // AuditComment: to.Ptr("comment"), + // CertificateSelfSigned: to.Ptr(armpanngfw.BooleanEnumTRUE), + // CertificateSignerResourceID: to.Ptr(""), + // Etag: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c27"), + // ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateSucceeded), + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/CertificateObjectLocalRulestack_Get_MinimumSet_Gen.json +func ExampleCertificateObjectLocalRulestackClient_Get_certificateObjectLocalRulestackGetMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewCertificateObjectLocalRulestackClient().Get(ctx, "rgopenapi", "lrs1", "armid1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.CertificateObjectLocalRulestackResource = armpanngfw.CertificateObjectLocalRulestackResource{ + // ID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourcegroups/rgopenapi/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/praval/certificates/armid1"), + // Properties: &armpanngfw.CertificateObject{ + // CertificateSelfSigned: to.Ptr(armpanngfw.BooleanEnumTRUE), + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/CertificateObjectLocalRulestack_CreateOrUpdate_MaximumSet_Gen.json +func ExampleCertificateObjectLocalRulestackClient_BeginCreateOrUpdate_certificateObjectLocalRulestackCreateOrUpdateMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewCertificateObjectLocalRulestackClient().BeginCreateOrUpdate(ctx, "rgopenapi", "lrs1", "armid1", armpanngfw.CertificateObjectLocalRulestackResource{ + Properties: &armpanngfw.CertificateObject{ + Description: to.Ptr("description"), + AuditComment: to.Ptr("comment"), + CertificateSelfSigned: to.Ptr(armpanngfw.BooleanEnumTRUE), + CertificateSignerResourceID: to.Ptr(""), + Etag: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c27"), + ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateAccepted), + }, + }, nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + res, err := poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.CertificateObjectLocalRulestackResource = armpanngfw.CertificateObjectLocalRulestackResource{ + // Name: to.Ptr("armid1"), + // Type: to.Ptr("certificates"), + // ID: to.Ptr("/providers/PaloAltoNetworks.Cloudngfw/globalrulestacks/armid1/certificates/armid1"), + // SystemData: &armpanngfw.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // CreatedBy: to.Ptr("praval"), + // CreatedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // LastModifiedBy: to.Ptr("praval"), + // LastModifiedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // }, + // Properties: &armpanngfw.CertificateObject{ + // Description: to.Ptr("description"), + // AuditComment: to.Ptr("comment"), + // CertificateSelfSigned: to.Ptr(armpanngfw.BooleanEnumTRUE), + // CertificateSignerResourceID: to.Ptr(""), + // Etag: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c27"), + // ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateSucceeded), + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/CertificateObjectLocalRulestack_CreateOrUpdate_MinimumSet_Gen.json +func ExampleCertificateObjectLocalRulestackClient_BeginCreateOrUpdate_certificateObjectLocalRulestackCreateOrUpdateMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewCertificateObjectLocalRulestackClient().BeginCreateOrUpdate(ctx, "rgopenapi", "lrs1", "armid1", armpanngfw.CertificateObjectLocalRulestackResource{ + Properties: &armpanngfw.CertificateObject{ + CertificateSelfSigned: to.Ptr(armpanngfw.BooleanEnumTRUE), + }, + }, nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + res, err := poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.CertificateObjectLocalRulestackResource = armpanngfw.CertificateObjectLocalRulestackResource{ + // ID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourcegroups/rgopenapi/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/praval/certificates/armid1"), + // Properties: &armpanngfw.CertificateObject{ + // CertificateSelfSigned: to.Ptr(armpanngfw.BooleanEnumTRUE), + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/CertificateObjectLocalRulestack_Delete_MaximumSet_Gen.json +func ExampleCertificateObjectLocalRulestackClient_BeginDelete_certificateObjectLocalRulestackDeleteMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewCertificateObjectLocalRulestackClient().BeginDelete(ctx, "rgopenapi", "lrs1", "armid1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/CertificateObjectLocalRulestack_Delete_MinimumSet_Gen.json +func ExampleCertificateObjectLocalRulestackClient_BeginDelete_certificateObjectLocalRulestackDeleteMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewCertificateObjectLocalRulestackClient().BeginDelete(ctx, "rgopenapi", "lrs1", "armid1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } +} diff --git a/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/ci.yml b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/ci.yml new file mode 100644 index 000000000000..b997139ea5a5 --- /dev/null +++ b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/ci.yml @@ -0,0 +1,28 @@ +# NOTE: Please refer to https://aka.ms/azsdk/engsys/ci-yaml before editing this file. +trigger: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/ + +pr: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/ + +stages: +- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + IncludeRelease: true + ServiceDirectory: 'resourcemanager/paloaltonetworksngfw/armpanngfw' diff --git a/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/client_factory.go b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/client_factory.go new file mode 100644 index 000000000000..f6fe0e8300d8 --- /dev/null +++ b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/client_factory.go @@ -0,0 +1,109 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armpanngfw + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" +) + +// ClientFactory is a client factory used to create any client in this module. +// Don't use this type directly, use NewClientFactory instead. +type ClientFactory struct { + subscriptionID string + credential azcore.TokenCredential + options *arm.ClientOptions +} + +// NewClientFactory creates a new instance of ClientFactory with the specified values. +// The parameter values will be propagated to any client created from this factory. +// - subscriptionID - The ID of the target subscription. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewClientFactory(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*ClientFactory, error) { + _, err := arm.NewClient(moduleName+".ClientFactory", moduleVersion, credential, options) + if err != nil { + return nil, err + } + return &ClientFactory{ + subscriptionID: subscriptionID, credential: credential, + options: options.Clone(), + }, nil +} + +func (c *ClientFactory) NewGlobalRulestackClient() *GlobalRulestackClient { + subClient, _ := NewGlobalRulestackClient(c.credential, c.options) + return subClient +} + +func (c *ClientFactory) NewCertificateObjectGlobalRulestackClient() *CertificateObjectGlobalRulestackClient { + subClient, _ := NewCertificateObjectGlobalRulestackClient(c.credential, c.options) + return subClient +} + +func (c *ClientFactory) NewFqdnListGlobalRulestackClient() *FqdnListGlobalRulestackClient { + subClient, _ := NewFqdnListGlobalRulestackClient(c.credential, c.options) + return subClient +} + +func (c *ClientFactory) NewPostRulesClient() *PostRulesClient { + subClient, _ := NewPostRulesClient(c.credential, c.options) + return subClient +} + +func (c *ClientFactory) NewPrefixListGlobalRulestackClient() *PrefixListGlobalRulestackClient { + subClient, _ := NewPrefixListGlobalRulestackClient(c.credential, c.options) + return subClient +} + +func (c *ClientFactory) NewPreRulesClient() *PreRulesClient { + subClient, _ := NewPreRulesClient(c.credential, c.options) + return subClient +} + +func (c *ClientFactory) NewOperationsClient() *OperationsClient { + subClient, _ := NewOperationsClient(c.credential, c.options) + return subClient +} + +func (c *ClientFactory) NewFirewallsClient() *FirewallsClient { + subClient, _ := NewFirewallsClient(c.subscriptionID, c.credential, c.options) + return subClient +} + +func (c *ClientFactory) NewLocalRulestacksClient() *LocalRulestacksClient { + subClient, _ := NewLocalRulestacksClient(c.subscriptionID, c.credential, c.options) + return subClient +} + +func (c *ClientFactory) NewFirewallStatusClient() *FirewallStatusClient { + subClient, _ := NewFirewallStatusClient(c.subscriptionID, c.credential, c.options) + return subClient +} + +func (c *ClientFactory) NewCertificateObjectLocalRulestackClient() *CertificateObjectLocalRulestackClient { + subClient, _ := NewCertificateObjectLocalRulestackClient(c.subscriptionID, c.credential, c.options) + return subClient +} + +func (c *ClientFactory) NewFqdnListLocalRulestackClient() *FqdnListLocalRulestackClient { + subClient, _ := NewFqdnListLocalRulestackClient(c.subscriptionID, c.credential, c.options) + return subClient +} + +func (c *ClientFactory) NewLocalRulesClient() *LocalRulesClient { + subClient, _ := NewLocalRulesClient(c.subscriptionID, c.credential, c.options) + return subClient +} + +func (c *ClientFactory) NewPrefixListLocalRulestackClient() *PrefixListLocalRulestackClient { + subClient, _ := NewPrefixListLocalRulestackClient(c.subscriptionID, c.credential, c.options) + return subClient +} diff --git a/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/constants.go b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/constants.go new file mode 100644 index 000000000000..69b6941017ed --- /dev/null +++ b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/constants.go @@ -0,0 +1,488 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armpanngfw + +const ( + moduleName = "armpanngfw" + moduleVersion = "v0.1.0" +) + +type ActionEnum string + +const ( + ActionEnumAllow ActionEnum = "Allow" + ActionEnumDenyResetBoth ActionEnum = "DenyResetBoth" + ActionEnumDenyResetServer ActionEnum = "DenyResetServer" + ActionEnumDenySilent ActionEnum = "DenySilent" +) + +// PossibleActionEnumValues returns the possible values for the ActionEnum const type. +func PossibleActionEnumValues() []ActionEnum { + return []ActionEnum{ + ActionEnumAllow, + ActionEnumDenyResetBoth, + ActionEnumDenyResetServer, + ActionEnumDenySilent, + } +} + +// ActionType - Enum. Indicates the action type. "Internal" refers to actions that are for internal only APIs. +type ActionType string + +const ( + ActionTypeInternal ActionType = "Internal" +) + +// PossibleActionTypeValues returns the possible values for the ActionType const type. +func PossibleActionTypeValues() []ActionType { + return []ActionType{ + ActionTypeInternal, + } +} + +type AdvSecurityObjectTypeEnum string + +const ( + AdvSecurityObjectTypeEnumFeeds AdvSecurityObjectTypeEnum = "feeds" + AdvSecurityObjectTypeEnumURLCustom AdvSecurityObjectTypeEnum = "urlCustom" +) + +// PossibleAdvSecurityObjectTypeEnumValues returns the possible values for the AdvSecurityObjectTypeEnum const type. +func PossibleAdvSecurityObjectTypeEnumValues() []AdvSecurityObjectTypeEnum { + return []AdvSecurityObjectTypeEnum{ + AdvSecurityObjectTypeEnumFeeds, + AdvSecurityObjectTypeEnumURLCustom, + } +} + +// BillingCycle - Billing cycle +type BillingCycle string + +const ( + BillingCycleMONTHLY BillingCycle = "MONTHLY" + BillingCycleWEEKLY BillingCycle = "WEEKLY" +) + +// PossibleBillingCycleValues returns the possible values for the BillingCycle const type. +func PossibleBillingCycleValues() []BillingCycle { + return []BillingCycle{ + BillingCycleMONTHLY, + BillingCycleWEEKLY, + } +} + +// BooleanEnum - Boolean Enum +type BooleanEnum string + +const ( + BooleanEnumFALSE BooleanEnum = "FALSE" + BooleanEnumTRUE BooleanEnum = "TRUE" +) + +// PossibleBooleanEnumValues returns the possible values for the BooleanEnum const type. +func PossibleBooleanEnumValues() []BooleanEnum { + return []BooleanEnum{ + BooleanEnumFALSE, + BooleanEnumTRUE, + } +} + +// CreatedByType - The type of identity that created the resource. +type CreatedByType string + +const ( + CreatedByTypeApplication CreatedByType = "Application" + CreatedByTypeKey CreatedByType = "Key" + CreatedByTypeManagedIdentity CreatedByType = "ManagedIdentity" + CreatedByTypeUser CreatedByType = "User" +) + +// PossibleCreatedByTypeValues returns the possible values for the CreatedByType const type. +func PossibleCreatedByTypeValues() []CreatedByType { + return []CreatedByType{ + CreatedByTypeApplication, + CreatedByTypeKey, + CreatedByTypeManagedIdentity, + CreatedByTypeUser, + } +} + +// DNSProxy - DNS Proxy +type DNSProxy string + +const ( + DNSProxyDISABLED DNSProxy = "DISABLED" + DNSProxyENABLED DNSProxy = "ENABLED" +) + +// PossibleDNSProxyValues returns the possible values for the DNSProxy const type. +func PossibleDNSProxyValues() []DNSProxy { + return []DNSProxy{ + DNSProxyDISABLED, + DNSProxyENABLED, + } +} + +type DecryptionRuleTypeEnum string + +const ( + DecryptionRuleTypeEnumNone DecryptionRuleTypeEnum = "None" + DecryptionRuleTypeEnumSSLInboundInspection DecryptionRuleTypeEnum = "SSLInboundInspection" + DecryptionRuleTypeEnumSSLOutboundInspection DecryptionRuleTypeEnum = "SSLOutboundInspection" +) + +// PossibleDecryptionRuleTypeEnumValues returns the possible values for the DecryptionRuleTypeEnum const type. +func PossibleDecryptionRuleTypeEnumValues() []DecryptionRuleTypeEnum { + return []DecryptionRuleTypeEnum{ + DecryptionRuleTypeEnumNone, + DecryptionRuleTypeEnumSSLInboundInspection, + DecryptionRuleTypeEnumSSLOutboundInspection, + } +} + +// DefaultMode - Type for Default Mode for rules creation +type DefaultMode string + +const ( + DefaultModeFIREWALL DefaultMode = "FIREWALL" + DefaultModeIPS DefaultMode = "IPS" + DefaultModeNONE DefaultMode = "NONE" +) + +// PossibleDefaultModeValues returns the possible values for the DefaultMode const type. +func PossibleDefaultModeValues() []DefaultMode { + return []DefaultMode{ + DefaultModeFIREWALL, + DefaultModeIPS, + DefaultModeNONE, + } +} + +// EgressNat - Egress NAT +type EgressNat string + +const ( + EgressNatDISABLED EgressNat = "DISABLED" + EgressNatENABLED EgressNat = "ENABLED" +) + +// PossibleEgressNatValues returns the possible values for the EgressNat const type. +func PossibleEgressNatValues() []EgressNat { + return []EgressNat{ + EgressNatDISABLED, + EgressNatENABLED, + } +} + +// EnabledDNSType - Enabled DNS type values +type EnabledDNSType string + +const ( + EnabledDNSTypeAZURE EnabledDNSType = "AZURE" + EnabledDNSTypeCUSTOM EnabledDNSType = "CUSTOM" +) + +// PossibleEnabledDNSTypeValues returns the possible values for the EnabledDNSType const type. +func PossibleEnabledDNSTypeValues() []EnabledDNSType { + return []EnabledDNSType{ + EnabledDNSTypeAZURE, + EnabledDNSTypeCUSTOM, + } +} + +// HealthStatus - Status Codes for the Firewall +type HealthStatus string + +const ( + HealthStatusGREEN HealthStatus = "GREEN" + HealthStatusINITIALIZING HealthStatus = "INITIALIZING" + HealthStatusRED HealthStatus = "RED" + HealthStatusYELLOW HealthStatus = "YELLOW" +) + +// PossibleHealthStatusValues returns the possible values for the HealthStatus const type. +func PossibleHealthStatusValues() []HealthStatus { + return []HealthStatus{ + HealthStatusGREEN, + HealthStatusINITIALIZING, + HealthStatusRED, + HealthStatusYELLOW, + } +} + +// LogOption - Log options possible +type LogOption string + +const ( + LogOptionINDIVIDUALDESTINATION LogOption = "INDIVIDUAL_DESTINATION" + LogOptionSAMEDESTINATION LogOption = "SAME_DESTINATION" +) + +// PossibleLogOptionValues returns the possible values for the LogOption const type. +func PossibleLogOptionValues() []LogOption { + return []LogOption{ + LogOptionINDIVIDUALDESTINATION, + LogOptionSAMEDESTINATION, + } +} + +// LogType - Possible log types +type LogType string + +const ( + LogTypeAUDIT LogType = "AUDIT" + LogTypeDECRYPTION LogType = "DECRYPTION" + LogTypeDLP LogType = "DLP" + LogTypeTHREAT LogType = "THREAT" + LogTypeTRAFFIC LogType = "TRAFFIC" + LogTypeWILDFIRE LogType = "WILDFIRE" +) + +// PossibleLogTypeValues returns the possible values for the LogType const type. +func PossibleLogTypeValues() []LogType { + return []LogType{ + LogTypeAUDIT, + LogTypeDECRYPTION, + LogTypeDLP, + LogTypeTHREAT, + LogTypeTRAFFIC, + LogTypeWILDFIRE, + } +} + +// ManagedIdentityType - The kind of managed identity assigned to this resource. +type ManagedIdentityType string + +const ( + ManagedIdentityTypeNone ManagedIdentityType = "None" + ManagedIdentityTypeSystemAndUserAssigned ManagedIdentityType = "SystemAssigned,UserAssigned" + ManagedIdentityTypeSystemAssigned ManagedIdentityType = "SystemAssigned" + ManagedIdentityTypeUserAssigned ManagedIdentityType = "UserAssigned" +) + +// PossibleManagedIdentityTypeValues returns the possible values for the ManagedIdentityType const type. +func PossibleManagedIdentityTypeValues() []ManagedIdentityType { + return []ManagedIdentityType{ + ManagedIdentityTypeNone, + ManagedIdentityTypeSystemAndUserAssigned, + ManagedIdentityTypeSystemAssigned, + ManagedIdentityTypeUserAssigned, + } +} + +// MarketplaceSubscriptionStatus - Marketplace Subscription Status +type MarketplaceSubscriptionStatus string + +const ( + MarketplaceSubscriptionStatusFulfillmentRequested MarketplaceSubscriptionStatus = "FulfillmentRequested" + MarketplaceSubscriptionStatusNotStarted MarketplaceSubscriptionStatus = "NotStarted" + MarketplaceSubscriptionStatusPendingFulfillmentStart MarketplaceSubscriptionStatus = "PendingFulfillmentStart" + MarketplaceSubscriptionStatusSubscribed MarketplaceSubscriptionStatus = "Subscribed" + MarketplaceSubscriptionStatusSuspended MarketplaceSubscriptionStatus = "Suspended" + MarketplaceSubscriptionStatusUnsubscribed MarketplaceSubscriptionStatus = "Unsubscribed" +) + +// PossibleMarketplaceSubscriptionStatusValues returns the possible values for the MarketplaceSubscriptionStatus const type. +func PossibleMarketplaceSubscriptionStatusValues() []MarketplaceSubscriptionStatus { + return []MarketplaceSubscriptionStatus{ + MarketplaceSubscriptionStatusFulfillmentRequested, + MarketplaceSubscriptionStatusNotStarted, + MarketplaceSubscriptionStatusPendingFulfillmentStart, + MarketplaceSubscriptionStatusSubscribed, + MarketplaceSubscriptionStatusSuspended, + MarketplaceSubscriptionStatusUnsubscribed, + } +} + +// NetworkType - NetworkType Enum +type NetworkType string + +const ( + NetworkTypeVNET NetworkType = "VNET" + NetworkTypeVWAN NetworkType = "VWAN" +) + +// PossibleNetworkTypeValues returns the possible values for the NetworkType const type. +func PossibleNetworkTypeValues() []NetworkType { + return []NetworkType{ + NetworkTypeVNET, + NetworkTypeVWAN, + } +} + +// Origin - The intended executor of the operation; as in Resource Based Access Control (RBAC) and audit logs UX. Default +// value is "user,system" +type Origin string + +const ( + OriginSystem Origin = "system" + OriginUser Origin = "user" + OriginUserSystem Origin = "user,system" +) + +// PossibleOriginValues returns the possible values for the Origin const type. +func PossibleOriginValues() []Origin { + return []Origin{ + OriginSystem, + OriginUser, + OriginUserSystem, + } +} + +// ProtocolType - Protocol Enum +type ProtocolType string + +const ( + ProtocolTypeTCP ProtocolType = "TCP" + ProtocolTypeUDP ProtocolType = "UDP" +) + +// PossibleProtocolTypeValues returns the possible values for the ProtocolType const type. +func PossibleProtocolTypeValues() []ProtocolType { + return []ProtocolType{ + ProtocolTypeTCP, + ProtocolTypeUDP, + } +} + +// ProvisioningState - Provisioning state of the firewall resource. +type ProvisioningState string + +const ( + ProvisioningStateAccepted ProvisioningState = "Accepted" + ProvisioningStateCanceled ProvisioningState = "Canceled" + ProvisioningStateCreating ProvisioningState = "Creating" + ProvisioningStateDeleted ProvisioningState = "Deleted" + ProvisioningStateDeleting ProvisioningState = "Deleting" + ProvisioningStateFailed ProvisioningState = "Failed" + ProvisioningStateNotSpecified ProvisioningState = "NotSpecified" + ProvisioningStateSucceeded ProvisioningState = "Succeeded" + ProvisioningStateUpdating ProvisioningState = "Updating" +) + +// PossibleProvisioningStateValues returns the possible values for the ProvisioningState const type. +func PossibleProvisioningStateValues() []ProvisioningState { + return []ProvisioningState{ + ProvisioningStateAccepted, + ProvisioningStateCanceled, + ProvisioningStateCreating, + ProvisioningStateDeleted, + ProvisioningStateDeleting, + ProvisioningStateFailed, + ProvisioningStateNotSpecified, + ProvisioningStateSucceeded, + ProvisioningStateUpdating, + } +} + +// ReadOnlyProvisioningState - Provisioning state of the firewall resource. +type ReadOnlyProvisioningState string + +const ( + ReadOnlyProvisioningStateDeleted ReadOnlyProvisioningState = "Deleted" + ReadOnlyProvisioningStateFailed ReadOnlyProvisioningState = "Failed" + ReadOnlyProvisioningStateSucceeded ReadOnlyProvisioningState = "Succeeded" +) + +// PossibleReadOnlyProvisioningStateValues returns the possible values for the ReadOnlyProvisioningState const type. +func PossibleReadOnlyProvisioningStateValues() []ReadOnlyProvisioningState { + return []ReadOnlyProvisioningState{ + ReadOnlyProvisioningStateDeleted, + ReadOnlyProvisioningStateFailed, + ReadOnlyProvisioningStateSucceeded, + } +} + +// ScopeType - Rulestack Type +type ScopeType string + +const ( + ScopeTypeGLOBAL ScopeType = "GLOBAL" + ScopeTypeLOCAL ScopeType = "LOCAL" +) + +// PossibleScopeTypeValues returns the possible values for the ScopeType const type. +func PossibleScopeTypeValues() []ScopeType { + return []ScopeType{ + ScopeTypeGLOBAL, + ScopeTypeLOCAL, + } +} + +type SecurityServicesTypeEnum string + +const ( + SecurityServicesTypeEnumAntiSpyware SecurityServicesTypeEnum = "antiSpyware" + SecurityServicesTypeEnumAntiVirus SecurityServicesTypeEnum = "antiVirus" + SecurityServicesTypeEnumDNSSubscription SecurityServicesTypeEnum = "dnsSubscription" + SecurityServicesTypeEnumFileBlocking SecurityServicesTypeEnum = "fileBlocking" + SecurityServicesTypeEnumIPsVulnerability SecurityServicesTypeEnum = "ipsVulnerability" + SecurityServicesTypeEnumURLFiltering SecurityServicesTypeEnum = "urlFiltering" +) + +// PossibleSecurityServicesTypeEnumValues returns the possible values for the SecurityServicesTypeEnum const type. +func PossibleSecurityServicesTypeEnumValues() []SecurityServicesTypeEnum { + return []SecurityServicesTypeEnum{ + SecurityServicesTypeEnumAntiSpyware, + SecurityServicesTypeEnumAntiVirus, + SecurityServicesTypeEnumDNSSubscription, + SecurityServicesTypeEnumFileBlocking, + SecurityServicesTypeEnumIPsVulnerability, + SecurityServicesTypeEnumURLFiltering, + } +} + +// ServerStatus - Connectivity Status for Panorama Server +type ServerStatus string + +const ( + ServerStatusDOWN ServerStatus = "DOWN" + ServerStatusUP ServerStatus = "UP" +) + +// PossibleServerStatusValues returns the possible values for the ServerStatus const type. +func PossibleServerStatusValues() []ServerStatus { + return []ServerStatus{ + ServerStatusDOWN, + ServerStatusUP, + } +} + +// StateEnum - Enabled or Disabled Enum +type StateEnum string + +const ( + StateEnumDISABLED StateEnum = "DISABLED" + StateEnumENABLED StateEnum = "ENABLED" +) + +// PossibleStateEnumValues returns the possible values for the StateEnum const type. +func PossibleStateEnumValues() []StateEnum { + return []StateEnum{ + StateEnumDISABLED, + StateEnumENABLED, + } +} + +// UsageType - Usage Type +type UsageType string + +const ( + UsageTypeCOMMITTED UsageType = "COMMITTED" + UsageTypePAYG UsageType = "PAYG" +) + +// PossibleUsageTypeValues returns the possible values for the UsageType const type. +func PossibleUsageTypeValues() []UsageType { + return []UsageType{ + UsageTypeCOMMITTED, + UsageTypePAYG, + } +} diff --git a/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/firewalls_client.go b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/firewalls_client.go new file mode 100644 index 000000000000..5dacb5f2f900 --- /dev/null +++ b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/firewalls_client.go @@ -0,0 +1,647 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armpanngfw + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// FirewallsClient contains the methods for the Firewalls group. +// Don't use this type directly, use NewFirewallsClient() instead. +type FirewallsClient struct { + internal *arm.Client + subscriptionID string +} + +// NewFirewallsClient creates a new instance of FirewallsClient with the specified values. +// - subscriptionID - The ID of the target subscription. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewFirewallsClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*FirewallsClient, error) { + cl, err := arm.NewClient(moduleName+".FirewallsClient", moduleVersion, credential, options) + if err != nil { + return nil, err + } + client := &FirewallsClient{ + subscriptionID: subscriptionID, + internal: cl, + } + return client, nil +} + +// BeginCreateOrUpdate - Create a FirewallResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - firewallName - Firewall resource name +// - resource - Resource create parameters. +// - options - FirewallsClientBeginCreateOrUpdateOptions contains the optional parameters for the FirewallsClient.BeginCreateOrUpdate +// method. +func (client *FirewallsClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, firewallName string, resource FirewallResource, options *FirewallsClientBeginCreateOrUpdateOptions) (*runtime.Poller[FirewallsClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, resourceGroupName, firewallName, resource, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[FirewallsClientCreateOrUpdateResponse]{ + FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + }) + } else { + return runtime.NewPollerFromResumeToken[FirewallsClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// CreateOrUpdate - Create a FirewallResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +func (client *FirewallsClient) createOrUpdate(ctx context.Context, resourceGroupName string, firewallName string, resource FirewallResource, options *FirewallsClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, firewallName, resource, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *FirewallsClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, firewallName string, resource FirewallResource, options *FirewallsClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/firewalls/{firewallName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if firewallName == "" { + return nil, errors.New("parameter firewallName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{firewallName}", url.PathEscape(firewallName)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, resource) +} + +// BeginDelete - Delete a FirewallResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - firewallName - Firewall resource name +// - options - FirewallsClientBeginDeleteOptions contains the optional parameters for the FirewallsClient.BeginDelete method. +func (client *FirewallsClient) BeginDelete(ctx context.Context, resourceGroupName string, firewallName string, options *FirewallsClientBeginDeleteOptions) (*runtime.Poller[FirewallsClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, firewallName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[FirewallsClientDeleteResponse]{ + FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + }) + } else { + return runtime.NewPollerFromResumeToken[FirewallsClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// Delete - Delete a FirewallResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +func (client *FirewallsClient) deleteOperation(ctx context.Context, resourceGroupName string, firewallName string, options *FirewallsClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, firewallName, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *FirewallsClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, firewallName string, options *FirewallsClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/firewalls/{firewallName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if firewallName == "" { + return nil, errors.New("parameter firewallName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{firewallName}", url.PathEscape(firewallName)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Get a FirewallResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - firewallName - Firewall resource name +// - options - FirewallsClientGetOptions contains the optional parameters for the FirewallsClient.Get method. +func (client *FirewallsClient) Get(ctx context.Context, resourceGroupName string, firewallName string, options *FirewallsClientGetOptions) (FirewallsClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, firewallName, options) + if err != nil { + return FirewallsClientGetResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return FirewallsClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return FirewallsClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *FirewallsClient) getCreateRequest(ctx context.Context, resourceGroupName string, firewallName string, options *FirewallsClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/firewalls/{firewallName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if firewallName == "" { + return nil, errors.New("parameter firewallName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{firewallName}", url.PathEscape(firewallName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *FirewallsClient) getHandleResponse(resp *http.Response) (FirewallsClientGetResponse, error) { + result := FirewallsClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.FirewallResource); err != nil { + return FirewallsClientGetResponse{}, err + } + return result, nil +} + +// GetGlobalRulestack - Get Global Rulestack associated with the Firewall +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - firewallName - Firewall resource name +// - options - FirewallsClientGetGlobalRulestackOptions contains the optional parameters for the FirewallsClient.GetGlobalRulestack +// method. +func (client *FirewallsClient) GetGlobalRulestack(ctx context.Context, resourceGroupName string, firewallName string, options *FirewallsClientGetGlobalRulestackOptions) (FirewallsClientGetGlobalRulestackResponse, error) { + req, err := client.getGlobalRulestackCreateRequest(ctx, resourceGroupName, firewallName, options) + if err != nil { + return FirewallsClientGetGlobalRulestackResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return FirewallsClientGetGlobalRulestackResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return FirewallsClientGetGlobalRulestackResponse{}, runtime.NewResponseError(resp) + } + return client.getGlobalRulestackHandleResponse(resp) +} + +// getGlobalRulestackCreateRequest creates the GetGlobalRulestack request. +func (client *FirewallsClient) getGlobalRulestackCreateRequest(ctx context.Context, resourceGroupName string, firewallName string, options *FirewallsClientGetGlobalRulestackOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/firewalls/{firewallName}/getGlobalRulestack" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if firewallName == "" { + return nil, errors.New("parameter firewallName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{firewallName}", url.PathEscape(firewallName)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getGlobalRulestackHandleResponse handles the GetGlobalRulestack response. +func (client *FirewallsClient) getGlobalRulestackHandleResponse(resp *http.Response) (FirewallsClientGetGlobalRulestackResponse, error) { + result := FirewallsClientGetGlobalRulestackResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.GlobalRulestackInfo); err != nil { + return FirewallsClientGetGlobalRulestackResponse{}, err + } + return result, nil +} + +// GetLogProfile - Log Profile for Firewall +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - firewallName - Firewall resource name +// - options - FirewallsClientGetLogProfileOptions contains the optional parameters for the FirewallsClient.GetLogProfile method. +func (client *FirewallsClient) GetLogProfile(ctx context.Context, resourceGroupName string, firewallName string, options *FirewallsClientGetLogProfileOptions) (FirewallsClientGetLogProfileResponse, error) { + req, err := client.getLogProfileCreateRequest(ctx, resourceGroupName, firewallName, options) + if err != nil { + return FirewallsClientGetLogProfileResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return FirewallsClientGetLogProfileResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return FirewallsClientGetLogProfileResponse{}, runtime.NewResponseError(resp) + } + return client.getLogProfileHandleResponse(resp) +} + +// getLogProfileCreateRequest creates the GetLogProfile request. +func (client *FirewallsClient) getLogProfileCreateRequest(ctx context.Context, resourceGroupName string, firewallName string, options *FirewallsClientGetLogProfileOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/firewalls/{firewallName}/getLogProfile" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if firewallName == "" { + return nil, errors.New("parameter firewallName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{firewallName}", url.PathEscape(firewallName)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getLogProfileHandleResponse handles the GetLogProfile response. +func (client *FirewallsClient) getLogProfileHandleResponse(resp *http.Response) (FirewallsClientGetLogProfileResponse, error) { + result := FirewallsClientGetLogProfileResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.LogSettings); err != nil { + return FirewallsClientGetLogProfileResponse{}, err + } + return result, nil +} + +// GetSupportInfo - support info for firewall. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - firewallName - Firewall resource name +// - options - FirewallsClientGetSupportInfoOptions contains the optional parameters for the FirewallsClient.GetSupportInfo +// method. +func (client *FirewallsClient) GetSupportInfo(ctx context.Context, resourceGroupName string, firewallName string, options *FirewallsClientGetSupportInfoOptions) (FirewallsClientGetSupportInfoResponse, error) { + req, err := client.getSupportInfoCreateRequest(ctx, resourceGroupName, firewallName, options) + if err != nil { + return FirewallsClientGetSupportInfoResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return FirewallsClientGetSupportInfoResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return FirewallsClientGetSupportInfoResponse{}, runtime.NewResponseError(resp) + } + return client.getSupportInfoHandleResponse(resp) +} + +// getSupportInfoCreateRequest creates the GetSupportInfo request. +func (client *FirewallsClient) getSupportInfoCreateRequest(ctx context.Context, resourceGroupName string, firewallName string, options *FirewallsClientGetSupportInfoOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/firewalls/{firewallName}/getSupportInfo" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if firewallName == "" { + return nil, errors.New("parameter firewallName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{firewallName}", url.PathEscape(firewallName)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + if options != nil && options.Email != nil { + reqQP.Set("email", *options.Email) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getSupportInfoHandleResponse handles the GetSupportInfo response. +func (client *FirewallsClient) getSupportInfoHandleResponse(resp *http.Response) (FirewallsClientGetSupportInfoResponse, error) { + result := FirewallsClientGetSupportInfoResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.SupportInfo); err != nil { + return FirewallsClientGetSupportInfoResponse{}, err + } + return result, nil +} + +// NewListByResourceGroupPager - List FirewallResource resources by resource group +// +// Generated from API version 2022-08-29-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - options - FirewallsClientListByResourceGroupOptions contains the optional parameters for the FirewallsClient.NewListByResourceGroupPager +// method. +func (client *FirewallsClient) NewListByResourceGroupPager(resourceGroupName string, options *FirewallsClientListByResourceGroupOptions) *runtime.Pager[FirewallsClientListByResourceGroupResponse] { + return runtime.NewPager(runtime.PagingHandler[FirewallsClientListByResourceGroupResponse]{ + More: func(page FirewallsClientListByResourceGroupResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *FirewallsClientListByResourceGroupResponse) (FirewallsClientListByResourceGroupResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return FirewallsClientListByResourceGroupResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return FirewallsClientListByResourceGroupResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return FirewallsClientListByResourceGroupResponse{}, runtime.NewResponseError(resp) + } + return client.listByResourceGroupHandleResponse(resp) + }, + }) +} + +// listByResourceGroupCreateRequest creates the ListByResourceGroup request. +func (client *FirewallsClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *FirewallsClientListByResourceGroupOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/firewalls" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listByResourceGroupHandleResponse handles the ListByResourceGroup response. +func (client *FirewallsClient) listByResourceGroupHandleResponse(resp *http.Response) (FirewallsClientListByResourceGroupResponse, error) { + result := FirewallsClientListByResourceGroupResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.FirewallResourceListResult); err != nil { + return FirewallsClientListByResourceGroupResponse{}, err + } + return result, nil +} + +// NewListBySubscriptionPager - List FirewallResource resources by subscription ID +// +// Generated from API version 2022-08-29-preview +// - options - FirewallsClientListBySubscriptionOptions contains the optional parameters for the FirewallsClient.NewListBySubscriptionPager +// method. +func (client *FirewallsClient) NewListBySubscriptionPager(options *FirewallsClientListBySubscriptionOptions) *runtime.Pager[FirewallsClientListBySubscriptionResponse] { + return runtime.NewPager(runtime.PagingHandler[FirewallsClientListBySubscriptionResponse]{ + More: func(page FirewallsClientListBySubscriptionResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *FirewallsClientListBySubscriptionResponse) (FirewallsClientListBySubscriptionResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listBySubscriptionCreateRequest(ctx, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return FirewallsClientListBySubscriptionResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return FirewallsClientListBySubscriptionResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return FirewallsClientListBySubscriptionResponse{}, runtime.NewResponseError(resp) + } + return client.listBySubscriptionHandleResponse(resp) + }, + }) +} + +// listBySubscriptionCreateRequest creates the ListBySubscription request. +func (client *FirewallsClient) listBySubscriptionCreateRequest(ctx context.Context, options *FirewallsClientListBySubscriptionOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/PaloAltoNetworks.Cloudngfw/firewalls" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listBySubscriptionHandleResponse handles the ListBySubscription response. +func (client *FirewallsClient) listBySubscriptionHandleResponse(resp *http.Response) (FirewallsClientListBySubscriptionResponse, error) { + result := FirewallsClientListBySubscriptionResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.FirewallResourceListResult); err != nil { + return FirewallsClientListBySubscriptionResponse{}, err + } + return result, nil +} + +// SaveLogProfile - Log Profile for Firewall +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - firewallName - Firewall resource name +// - options - FirewallsClientSaveLogProfileOptions contains the optional parameters for the FirewallsClient.SaveLogProfile +// method. +func (client *FirewallsClient) SaveLogProfile(ctx context.Context, resourceGroupName string, firewallName string, options *FirewallsClientSaveLogProfileOptions) (FirewallsClientSaveLogProfileResponse, error) { + req, err := client.saveLogProfileCreateRequest(ctx, resourceGroupName, firewallName, options) + if err != nil { + return FirewallsClientSaveLogProfileResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return FirewallsClientSaveLogProfileResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusNoContent) { + return FirewallsClientSaveLogProfileResponse{}, runtime.NewResponseError(resp) + } + return FirewallsClientSaveLogProfileResponse{}, nil +} + +// saveLogProfileCreateRequest creates the SaveLogProfile request. +func (client *FirewallsClient) saveLogProfileCreateRequest(ctx context.Context, resourceGroupName string, firewallName string, options *FirewallsClientSaveLogProfileOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/firewalls/{firewallName}/saveLogProfile" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if firewallName == "" { + return nil, errors.New("parameter firewallName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{firewallName}", url.PathEscape(firewallName)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + if options != nil && options.LogSettings != nil { + return req, runtime.MarshalAsJSON(req, *options.LogSettings) + } + return req, nil +} + +// Update - Update a FirewallResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - firewallName - Firewall resource name +// - properties - The resource properties to be updated. +// - options - FirewallsClientUpdateOptions contains the optional parameters for the FirewallsClient.Update method. +func (client *FirewallsClient) Update(ctx context.Context, resourceGroupName string, firewallName string, properties FirewallResourceUpdate, options *FirewallsClientUpdateOptions) (FirewallsClientUpdateResponse, error) { + req, err := client.updateCreateRequest(ctx, resourceGroupName, firewallName, properties, options) + if err != nil { + return FirewallsClientUpdateResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return FirewallsClientUpdateResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return FirewallsClientUpdateResponse{}, runtime.NewResponseError(resp) + } + return client.updateHandleResponse(resp) +} + +// updateCreateRequest creates the Update request. +func (client *FirewallsClient) updateCreateRequest(ctx context.Context, resourceGroupName string, firewallName string, properties FirewallResourceUpdate, options *FirewallsClientUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/firewalls/{firewallName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if firewallName == "" { + return nil, errors.New("parameter firewallName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{firewallName}", url.PathEscape(firewallName)) + req, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, properties) +} + +// updateHandleResponse handles the Update response. +func (client *FirewallsClient) updateHandleResponse(resp *http.Response) (FirewallsClientUpdateResponse, error) { + result := FirewallsClientUpdateResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.FirewallResource); err != nil { + return FirewallsClientUpdateResponse{}, err + } + return result, nil +} diff --git a/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/firewalls_client_example_test.go b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/firewalls_client_example_test.go new file mode 100644 index 000000000000..ea7a1961f110 --- /dev/null +++ b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/firewalls_client_example_test.go @@ -0,0 +1,1734 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armpanngfw_test + +import ( + "context" + "log" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw" +) + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/Firewalls_ListBySubscription_MaximumSet_Gen.json +func ExampleFirewallsClient_NewListBySubscriptionPager_firewallsListBySubscriptionMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + pager := clientFactory.NewFirewallsClient().NewListBySubscriptionPager(nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + log.Fatalf("failed to advance page: %v", err) + } + for _, v := range page.Value { + // You could use page here. We use blank identifier for just demo purposes. + _ = v + } + // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // page.FirewallResourceListResult = armpanngfw.FirewallResourceListResult{ + // Value: []*armpanngfw.FirewallResource{ + // { + // Name: to.Ptr("aaaaaaaaaaaaa"), + // Type: to.Ptr("aaaaaa"), + // ID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaa"), + // SystemData: &armpanngfw.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // CreatedBy: to.Ptr("praval"), + // CreatedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // LastModifiedBy: to.Ptr("praval"), + // LastModifiedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // }, + // Location: to.Ptr("eastus"), + // Tags: map[string]*string{ + // "tagName": to.Ptr("value"), + // }, + // Identity: &armpanngfw.AzureResourceManagerManagedIdentityProperties{ + // Type: to.Ptr(armpanngfw.ManagedIdentityTypeNone), + // PrincipalID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaa"), + // TenantID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // UserAssignedIdentities: map[string]*armpanngfw.AzureResourceManagerUserAssignedIdentity{ + // "key16": &armpanngfw.AzureResourceManagerUserAssignedIdentity{ + // ClientID: to.Ptr("aaaa"), + // PrincipalID: to.Ptr("aaaaaaaaaaaaaaa"), + // }, + // }, + // }, + // Properties: &armpanngfw.FirewallDeploymentProperties{ + // AssociatedRulestack: &armpanngfw.RulestackDetails{ + // Location: to.Ptr("eastus"), + // ResourceID: to.Ptr("aaaaaaaaaa"), + // RulestackID: to.Ptr("aaaaaaaaaaaaaaaa"), + // }, + // DNSSettings: &armpanngfw.DNSSettings{ + // DNSServers: []*armpanngfw.IPAddress{ + // { + // Address: to.Ptr("20.22.92.111"), + // ResourceID: to.Ptr("/subscriptions/01c7d41f-afaf-464e-8a8b-5c6f9f98cee8/resourceGroups/mj-liftr-integration/providers/Microsoft.Network/publicIPAddresses/mj-liftr-integration-egressNatIp1"), + // }}, + // EnableDNSProxy: to.Ptr(armpanngfw.DNSProxyDISABLED), + // EnabledDNSType: to.Ptr(armpanngfw.EnabledDNSTypeCUSTOM), + // }, + // FrontEndSettings: []*armpanngfw.FrontendSetting{ + // { + // Name: to.Ptr("frontendsetting11"), + // BackendConfiguration: &armpanngfw.EndpointConfiguration{ + // Address: &armpanngfw.IPAddress{ + // Address: to.Ptr("20.22.32.136"), + // ResourceID: to.Ptr("/subscriptions/01c7d41f-afaf-464e-8a8b-5c6f9f98cee8/resourceGroups/mj-liftr-integration/providers/Microsoft.Network/publicIPAddresses/mj-liftr-integration-frontendSettingIp2"), + // }, + // Port: to.Ptr("80"), + // }, + // FrontendConfiguration: &armpanngfw.EndpointConfiguration{ + // Address: &armpanngfw.IPAddress{ + // Address: to.Ptr("20.22.91.251"), + // ResourceID: to.Ptr("/subscriptions/01c7d41f-afaf-464e-8a8b-5c6f9f98cee8/resourceGroups/mj-liftr-integration/providers/Microsoft.Network/publicIPAddresses/mj-liftr-integration-frontendSettingIp1"), + // }, + // Port: to.Ptr("80"), + // }, + // Protocol: to.Ptr(armpanngfw.ProtocolTypeTCP), + // }}, + // IsPanoramaManaged: to.Ptr(armpanngfw.BooleanEnumTRUE), + // MarketplaceDetails: &armpanngfw.MarketplaceDetails{ + // MarketplaceSubscriptionID: to.Ptr("aa"), + // MarketplaceSubscriptionStatus: to.Ptr(armpanngfw.MarketplaceSubscriptionStatusPendingFulfillmentStart), + // OfferID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // PublisherID: to.Ptr("aaaa"), + // }, + // NetworkProfile: &armpanngfw.NetworkProfile{ + // EgressNatIP: []*armpanngfw.IPAddress{ + // { + // Address: to.Ptr("20.22.92.111"), + // ResourceID: to.Ptr("/subscriptions/01c7d41f-afaf-464e-8a8b-5c6f9f98cee8/resourceGroups/mj-liftr-integration/providers/Microsoft.Network/publicIPAddresses/mj-liftr-integration-egressNatIp1"), + // }}, + // EnableEgressNat: to.Ptr(armpanngfw.EgressNatENABLED), + // NetworkType: to.Ptr(armpanngfw.NetworkTypeVNET), + // PublicIPs: []*armpanngfw.IPAddress{ + // { + // Address: to.Ptr("20.22.92.11"), + // ResourceID: to.Ptr("/subscriptions/01c7d41f-afaf-464e-8a8b-5c6f9f98cee8/resourceGroups/mj-liftr-integration/providers/Microsoft.Network/publicIPAddresses/mj-liftr-integration-PublicIp1"), + // }}, + // VnetConfiguration: &armpanngfw.VnetConfiguration{ + // IPOfTrustSubnetForUdr: &armpanngfw.IPAddress{ + // Address: to.Ptr("10.1.1.0/24"), + // ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-untrust-subnet"), + // }, + // TrustSubnet: &armpanngfw.IPAddressSpace{ + // AddressSpace: to.Ptr("10.1.1.0/24"), + // ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-trust-subnet"), + // }, + // UnTrustSubnet: &armpanngfw.IPAddressSpace{ + // AddressSpace: to.Ptr("10.1.1.0/24"), + // ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-untrust-subnet"), + // }, + // Vnet: &armpanngfw.IPAddressSpace{ + // AddressSpace: to.Ptr("10.1.0.0/16"), + // ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet"), + // }, + // }, + // VwanConfiguration: &armpanngfw.VwanConfiguration{ + // IPOfTrustSubnetForUdr: &armpanngfw.IPAddress{ + // Address: to.Ptr("10.1.1.0/24"), + // ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-untrust-subnet"), + // }, + // NetworkVirtualApplianceID: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c12"), + // TrustSubnet: &armpanngfw.IPAddressSpace{ + // AddressSpace: to.Ptr("10.1.1.0/24"), + // ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-trust-subnet"), + // }, + // UnTrustSubnet: &armpanngfw.IPAddressSpace{ + // AddressSpace: to.Ptr("10.1.1.0/24"), + // ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-untrust-subnet"), + // }, + // VHub: &armpanngfw.IPAddressSpace{ + // AddressSpace: to.Ptr("10.1.1.0/24"), + // ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-untrust-subnet"), + // }, + // }, + // }, + // PanEtag: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c12"), + // PanoramaConfig: &armpanngfw.PanoramaConfig{ + // CgName: to.Ptr("PanoramaCollectorGroup"), + // ConfigString: to.Ptr("bas64EncodedString"), + // DgName: to.Ptr("PanoramaDeviceGroup"), + // HostName: to.Ptr("hostname"), + // PanoramaServer: to.Ptr("10.25.1.1"), + // PanoramaServer2: to.Ptr("10.20.1.1"), + // TplName: to.Ptr("PanoramaTemplateStack"), + // VMAuthKey: to.Ptr("SSH_AUTH_KEY"), + // }, + // PlanData: &armpanngfw.PlanData{ + // BillingCycle: to.Ptr(armpanngfw.BillingCycleWEEKLY), + // EffectiveDate: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-13T00:46:05.283Z"); return t}()), + // PlanID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // UsageType: to.Ptr(armpanngfw.UsageTypePAYG), + // }, + // ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateSucceeded), + // }, + // }}, + // } + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/Firewalls_ListBySubscription_MinimumSet_Gen.json +func ExampleFirewallsClient_NewListBySubscriptionPager_firewallsListBySubscriptionMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + pager := clientFactory.NewFirewallsClient().NewListBySubscriptionPager(nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + log.Fatalf("failed to advance page: %v", err) + } + for _, v := range page.Value { + // You could use page here. We use blank identifier for just demo purposes. + _ = v + } + // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // page.FirewallResourceListResult = armpanngfw.FirewallResourceListResult{ + // Value: []*armpanngfw.FirewallResource{ + // { + // ID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/providers/PaloAltoNetworks.Cloudngfw/firewalls/firewall"), + // Location: to.Ptr("eastus"), + // Properties: &armpanngfw.FirewallDeploymentProperties{ + // DNSSettings: &armpanngfw.DNSSettings{ + // }, + // MarketplaceDetails: &armpanngfw.MarketplaceDetails{ + // OfferID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // PublisherID: to.Ptr("aaaa"), + // }, + // NetworkProfile: &armpanngfw.NetworkProfile{ + // EnableEgressNat: to.Ptr(armpanngfw.EgressNatENABLED), + // NetworkType: to.Ptr(armpanngfw.NetworkTypeVNET), + // PublicIPs: []*armpanngfw.IPAddress{ + // { + // Address: to.Ptr("20.22.92.11"), + // ResourceID: to.Ptr("/subscriptions/01c7d41f-afaf-464e-8a8b-5c6f9f98cee8/resourceGroups/mj-liftr-integration/providers/Microsoft.Network/publicIPAddresses/mj-liftr-integration-PublicIp1"), + // }}, + // }, + // PlanData: &armpanngfw.PlanData{ + // BillingCycle: to.Ptr(armpanngfw.BillingCycleWEEKLY), + // PlanID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // }, + // }, + // }}, + // } + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/Firewalls_ListByResourceGroup_MaximumSet_Gen.json +func ExampleFirewallsClient_NewListByResourceGroupPager_firewallsListByResourceGroupMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + pager := clientFactory.NewFirewallsClient().NewListByResourceGroupPager("firewall-rg", nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + log.Fatalf("failed to advance page: %v", err) + } + for _, v := range page.Value { + // You could use page here. We use blank identifier for just demo purposes. + _ = v + } + // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // page.FirewallResourceListResult = armpanngfw.FirewallResourceListResult{ + // Value: []*armpanngfw.FirewallResource{ + // { + // Name: to.Ptr("aaaaaaaaaaaaa"), + // Type: to.Ptr("aaaaaa"), + // ID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaa"), + // SystemData: &armpanngfw.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // CreatedBy: to.Ptr("praval"), + // CreatedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // LastModifiedBy: to.Ptr("praval"), + // LastModifiedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // }, + // Location: to.Ptr("eastus"), + // Tags: map[string]*string{ + // "tagName": to.Ptr("value"), + // }, + // Identity: &armpanngfw.AzureResourceManagerManagedIdentityProperties{ + // Type: to.Ptr(armpanngfw.ManagedIdentityTypeNone), + // PrincipalID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaa"), + // TenantID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // UserAssignedIdentities: map[string]*armpanngfw.AzureResourceManagerUserAssignedIdentity{ + // "key16": &armpanngfw.AzureResourceManagerUserAssignedIdentity{ + // ClientID: to.Ptr("aaaa"), + // PrincipalID: to.Ptr("aaaaaaaaaaaaaaa"), + // }, + // }, + // }, + // Properties: &armpanngfw.FirewallDeploymentProperties{ + // AssociatedRulestack: &armpanngfw.RulestackDetails{ + // Location: to.Ptr("eastus"), + // ResourceID: to.Ptr("aaaaaaaaaa"), + // RulestackID: to.Ptr("aaaaaaaaaaaaaaaa"), + // }, + // DNSSettings: &armpanngfw.DNSSettings{ + // DNSServers: []*armpanngfw.IPAddress{ + // { + // Address: to.Ptr("20.22.92.111"), + // ResourceID: to.Ptr("/subscriptions/01c7d41f-afaf-464e-8a8b-5c6f9f98cee8/resourceGroups/mj-liftr-integration/providers/Microsoft.Network/publicIPAddresses/mj-liftr-integration-egressNatIp1"), + // }}, + // EnableDNSProxy: to.Ptr(armpanngfw.DNSProxyDISABLED), + // EnabledDNSType: to.Ptr(armpanngfw.EnabledDNSTypeCUSTOM), + // }, + // FrontEndSettings: []*armpanngfw.FrontendSetting{ + // { + // Name: to.Ptr("frontendsetting11"), + // BackendConfiguration: &armpanngfw.EndpointConfiguration{ + // Address: &armpanngfw.IPAddress{ + // Address: to.Ptr("20.22.32.136"), + // ResourceID: to.Ptr("/subscriptions/01c7d41f-afaf-464e-8a8b-5c6f9f98cee8/resourceGroups/mj-liftr-integration/providers/Microsoft.Network/publicIPAddresses/mj-liftr-integration-frontendSettingIp2"), + // }, + // Port: to.Ptr("80"), + // }, + // FrontendConfiguration: &armpanngfw.EndpointConfiguration{ + // Address: &armpanngfw.IPAddress{ + // Address: to.Ptr("20.22.91.251"), + // ResourceID: to.Ptr("/subscriptions/01c7d41f-afaf-464e-8a8b-5c6f9f98cee8/resourceGroups/mj-liftr-integration/providers/Microsoft.Network/publicIPAddresses/mj-liftr-integration-frontendSettingIp1"), + // }, + // Port: to.Ptr("80"), + // }, + // Protocol: to.Ptr(armpanngfw.ProtocolTypeTCP), + // }}, + // IsPanoramaManaged: to.Ptr(armpanngfw.BooleanEnumTRUE), + // MarketplaceDetails: &armpanngfw.MarketplaceDetails{ + // MarketplaceSubscriptionID: to.Ptr("aa"), + // MarketplaceSubscriptionStatus: to.Ptr(armpanngfw.MarketplaceSubscriptionStatusPendingFulfillmentStart), + // OfferID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // PublisherID: to.Ptr("aaaa"), + // }, + // NetworkProfile: &armpanngfw.NetworkProfile{ + // EgressNatIP: []*armpanngfw.IPAddress{ + // { + // Address: to.Ptr("20.22.92.111"), + // ResourceID: to.Ptr("/subscriptions/01c7d41f-afaf-464e-8a8b-5c6f9f98cee8/resourceGroups/mj-liftr-integration/providers/Microsoft.Network/publicIPAddresses/mj-liftr-integration-egressNatIp1"), + // }}, + // EnableEgressNat: to.Ptr(armpanngfw.EgressNatENABLED), + // NetworkType: to.Ptr(armpanngfw.NetworkTypeVNET), + // PublicIPs: []*armpanngfw.IPAddress{ + // { + // Address: to.Ptr("20.22.92.11"), + // ResourceID: to.Ptr("/subscriptions/01c7d41f-afaf-464e-8a8b-5c6f9f98cee8/resourceGroups/mj-liftr-integration/providers/Microsoft.Network/publicIPAddresses/mj-liftr-integration-PublicIp1"), + // }}, + // VnetConfiguration: &armpanngfw.VnetConfiguration{ + // IPOfTrustSubnetForUdr: &armpanngfw.IPAddress{ + // Address: to.Ptr("10.1.1.0/24"), + // ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-untrust-subnet"), + // }, + // TrustSubnet: &armpanngfw.IPAddressSpace{ + // AddressSpace: to.Ptr("10.1.1.0/24"), + // ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-trust-subnet"), + // }, + // UnTrustSubnet: &armpanngfw.IPAddressSpace{ + // AddressSpace: to.Ptr("10.1.1.0/24"), + // ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-untrust-subnet"), + // }, + // Vnet: &armpanngfw.IPAddressSpace{ + // AddressSpace: to.Ptr("10.1.0.0/16"), + // ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet"), + // }, + // }, + // VwanConfiguration: &armpanngfw.VwanConfiguration{ + // IPOfTrustSubnetForUdr: &armpanngfw.IPAddress{ + // Address: to.Ptr("10.1.1.0/24"), + // ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-untrust-subnet"), + // }, + // NetworkVirtualApplianceID: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c12"), + // TrustSubnet: &armpanngfw.IPAddressSpace{ + // AddressSpace: to.Ptr("10.1.1.0/24"), + // ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-trust-subnet"), + // }, + // UnTrustSubnet: &armpanngfw.IPAddressSpace{ + // AddressSpace: to.Ptr("10.1.1.0/24"), + // ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-untrust-subnet"), + // }, + // VHub: &armpanngfw.IPAddressSpace{ + // AddressSpace: to.Ptr("10.1.1.0/24"), + // ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-untrust-subnet"), + // }, + // }, + // }, + // PanEtag: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c12"), + // PanoramaConfig: &armpanngfw.PanoramaConfig{ + // CgName: to.Ptr("PanoramaCollectorGroup"), + // ConfigString: to.Ptr("bas64EncodedString"), + // DgName: to.Ptr("PanoramaDeviceGroup"), + // HostName: to.Ptr("hostname"), + // PanoramaServer: to.Ptr("10.25.1.1"), + // PanoramaServer2: to.Ptr("10.20.1.1"), + // TplName: to.Ptr("PanoramaTemplateStack"), + // VMAuthKey: to.Ptr("SSH_AUTH_KEY"), + // }, + // PlanData: &armpanngfw.PlanData{ + // BillingCycle: to.Ptr(armpanngfw.BillingCycleWEEKLY), + // EffectiveDate: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-13T00:46:05.283Z"); return t}()), + // PlanID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // UsageType: to.Ptr(armpanngfw.UsageTypePAYG), + // }, + // ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateSucceeded), + // }, + // }}, + // } + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/Firewalls_ListByResourceGroup_MinimumSet_Gen.json +func ExampleFirewallsClient_NewListByResourceGroupPager_firewallsListByResourceGroupMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + pager := clientFactory.NewFirewallsClient().NewListByResourceGroupPager("firewall-rg", nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + log.Fatalf("failed to advance page: %v", err) + } + for _, v := range page.Value { + // You could use page here. We use blank identifier for just demo purposes. + _ = v + } + // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // page.FirewallResourceListResult = armpanngfw.FirewallResourceListResult{ + // Value: []*armpanngfw.FirewallResource{ + // { + // ID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourcegroups/firewall-rg/providers/PaloAltoNetworks.Cloudngfw/firewalls/firewall"), + // Location: to.Ptr("eastus"), + // Properties: &armpanngfw.FirewallDeploymentProperties{ + // DNSSettings: &armpanngfw.DNSSettings{ + // }, + // MarketplaceDetails: &armpanngfw.MarketplaceDetails{ + // OfferID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // PublisherID: to.Ptr("aaaa"), + // }, + // NetworkProfile: &armpanngfw.NetworkProfile{ + // EnableEgressNat: to.Ptr(armpanngfw.EgressNatENABLED), + // NetworkType: to.Ptr(armpanngfw.NetworkTypeVNET), + // PublicIPs: []*armpanngfw.IPAddress{ + // { + // Address: to.Ptr("20.22.92.11"), + // ResourceID: to.Ptr("/subscriptions/01c7d41f-afaf-464e-8a8b-5c6f9f98cee8/resourceGroups/mj-liftr-integration/providers/Microsoft.Network/publicIPAddresses/mj-liftr-integration-PublicIp1"), + // }}, + // }, + // PlanData: &armpanngfw.PlanData{ + // BillingCycle: to.Ptr(armpanngfw.BillingCycleWEEKLY), + // PlanID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // }, + // }, + // }}, + // } + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/Firewalls_Get_MaximumSet_Gen.json +func ExampleFirewallsClient_Get_firewallsGetMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewFirewallsClient().Get(ctx, "firewall-rg", "firewall1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.FirewallResource = armpanngfw.FirewallResource{ + // Name: to.Ptr("aaaaaaaaaaaaa"), + // Type: to.Ptr("aaaaaa"), + // ID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaa"), + // SystemData: &armpanngfw.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // CreatedBy: to.Ptr("praval"), + // CreatedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // LastModifiedBy: to.Ptr("praval"), + // LastModifiedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // }, + // Location: to.Ptr("eastus"), + // Tags: map[string]*string{ + // "tagName": to.Ptr("value"), + // }, + // Identity: &armpanngfw.AzureResourceManagerManagedIdentityProperties{ + // Type: to.Ptr(armpanngfw.ManagedIdentityTypeNone), + // PrincipalID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaa"), + // TenantID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // UserAssignedIdentities: map[string]*armpanngfw.AzureResourceManagerUserAssignedIdentity{ + // "key16": &armpanngfw.AzureResourceManagerUserAssignedIdentity{ + // ClientID: to.Ptr("aaaa"), + // PrincipalID: to.Ptr("aaaaaaaaaaaaaaa"), + // }, + // }, + // }, + // Properties: &armpanngfw.FirewallDeploymentProperties{ + // AssociatedRulestack: &armpanngfw.RulestackDetails{ + // Location: to.Ptr("eastus"), + // ResourceID: to.Ptr("aaaaaaaaaa"), + // RulestackID: to.Ptr("aaaaaaaaaaaaaaaa"), + // }, + // DNSSettings: &armpanngfw.DNSSettings{ + // DNSServers: []*armpanngfw.IPAddress{ + // { + // Address: to.Ptr("20.22.92.111"), + // ResourceID: to.Ptr("/subscriptions/01c7d41f-afaf-464e-8a8b-5c6f9f98cee8/resourceGroups/mj-liftr-integration/providers/Microsoft.Network/publicIPAddresses/mj-liftr-integration-egressNatIp1"), + // }}, + // EnableDNSProxy: to.Ptr(armpanngfw.DNSProxyDISABLED), + // EnabledDNSType: to.Ptr(armpanngfw.EnabledDNSTypeCUSTOM), + // }, + // FrontEndSettings: []*armpanngfw.FrontendSetting{ + // { + // Name: to.Ptr("frontendsetting11"), + // BackendConfiguration: &armpanngfw.EndpointConfiguration{ + // Address: &armpanngfw.IPAddress{ + // Address: to.Ptr("20.22.32.136"), + // ResourceID: to.Ptr("/subscriptions/01c7d41f-afaf-464e-8a8b-5c6f9f98cee8/resourceGroups/mj-liftr-integration/providers/Microsoft.Network/publicIPAddresses/mj-liftr-integration-frontendSettingIp2"), + // }, + // Port: to.Ptr("80"), + // }, + // FrontendConfiguration: &armpanngfw.EndpointConfiguration{ + // Address: &armpanngfw.IPAddress{ + // Address: to.Ptr("20.22.91.251"), + // ResourceID: to.Ptr("/subscriptions/01c7d41f-afaf-464e-8a8b-5c6f9f98cee8/resourceGroups/mj-liftr-integration/providers/Microsoft.Network/publicIPAddresses/mj-liftr-integration-frontendSettingIp1"), + // }, + // Port: to.Ptr("80"), + // }, + // Protocol: to.Ptr(armpanngfw.ProtocolTypeTCP), + // }}, + // IsPanoramaManaged: to.Ptr(armpanngfw.BooleanEnumTRUE), + // MarketplaceDetails: &armpanngfw.MarketplaceDetails{ + // MarketplaceSubscriptionID: to.Ptr("aa"), + // MarketplaceSubscriptionStatus: to.Ptr(armpanngfw.MarketplaceSubscriptionStatusPendingFulfillmentStart), + // OfferID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // PublisherID: to.Ptr("aaaa"), + // }, + // NetworkProfile: &armpanngfw.NetworkProfile{ + // EgressNatIP: []*armpanngfw.IPAddress{ + // { + // Address: to.Ptr("20.22.92.111"), + // ResourceID: to.Ptr("/subscriptions/01c7d41f-afaf-464e-8a8b-5c6f9f98cee8/resourceGroups/mj-liftr-integration/providers/Microsoft.Network/publicIPAddresses/mj-liftr-integration-egressNatIp1"), + // }}, + // EnableEgressNat: to.Ptr(armpanngfw.EgressNatENABLED), + // NetworkType: to.Ptr(armpanngfw.NetworkTypeVNET), + // PublicIPs: []*armpanngfw.IPAddress{ + // { + // Address: to.Ptr("20.22.92.11"), + // ResourceID: to.Ptr("/subscriptions/01c7d41f-afaf-464e-8a8b-5c6f9f98cee8/resourceGroups/mj-liftr-integration/providers/Microsoft.Network/publicIPAddresses/mj-liftr-integration-PublicIp1"), + // }}, + // VnetConfiguration: &armpanngfw.VnetConfiguration{ + // IPOfTrustSubnetForUdr: &armpanngfw.IPAddress{ + // Address: to.Ptr("10.1.1.0/24"), + // ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-untrust-subnet"), + // }, + // TrustSubnet: &armpanngfw.IPAddressSpace{ + // AddressSpace: to.Ptr("10.1.1.0/24"), + // ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-trust-subnet"), + // }, + // UnTrustSubnet: &armpanngfw.IPAddressSpace{ + // AddressSpace: to.Ptr("10.1.1.0/24"), + // ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-untrust-subnet"), + // }, + // Vnet: &armpanngfw.IPAddressSpace{ + // AddressSpace: to.Ptr("10.1.0.0/16"), + // ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet"), + // }, + // }, + // VwanConfiguration: &armpanngfw.VwanConfiguration{ + // IPOfTrustSubnetForUdr: &armpanngfw.IPAddress{ + // Address: to.Ptr("10.1.1.0/24"), + // ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-untrust-subnet"), + // }, + // NetworkVirtualApplianceID: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c12"), + // TrustSubnet: &armpanngfw.IPAddressSpace{ + // AddressSpace: to.Ptr("10.1.1.0/24"), + // ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-trust-subnet"), + // }, + // UnTrustSubnet: &armpanngfw.IPAddressSpace{ + // AddressSpace: to.Ptr("10.1.1.0/24"), + // ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-untrust-subnet"), + // }, + // VHub: &armpanngfw.IPAddressSpace{ + // AddressSpace: to.Ptr("10.1.1.0/24"), + // ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-untrust-subnet"), + // }, + // }, + // }, + // PanEtag: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c12"), + // PanoramaConfig: &armpanngfw.PanoramaConfig{ + // CgName: to.Ptr("PanoramaCollectorGroup"), + // ConfigString: to.Ptr("bas64EncodedString"), + // DgName: to.Ptr("PanoramaDeviceGroup"), + // HostName: to.Ptr("hostname"), + // PanoramaServer: to.Ptr("10.25.1.1"), + // PanoramaServer2: to.Ptr("10.20.1.1"), + // TplName: to.Ptr("PanoramaTemplateStack"), + // VMAuthKey: to.Ptr("SSH_AUTH_KEY"), + // }, + // PlanData: &armpanngfw.PlanData{ + // BillingCycle: to.Ptr(armpanngfw.BillingCycleWEEKLY), + // EffectiveDate: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-13T00:46:05.283Z"); return t}()), + // PlanID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // UsageType: to.Ptr(armpanngfw.UsageTypePAYG), + // }, + // ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateSucceeded), + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/Firewalls_Get_MinimumSet_Gen.json +func ExampleFirewallsClient_Get_firewallsGetMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewFirewallsClient().Get(ctx, "firewall-rg", "firewall1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.FirewallResource = armpanngfw.FirewallResource{ + // ID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourcegroups/firewall-rg/providers/PaloAltoNetworks.Cloudngfw/firewalls/firewall1"), + // Location: to.Ptr("eastus"), + // Properties: &armpanngfw.FirewallDeploymentProperties{ + // DNSSettings: &armpanngfw.DNSSettings{ + // }, + // MarketplaceDetails: &armpanngfw.MarketplaceDetails{ + // OfferID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // PublisherID: to.Ptr("aaaa"), + // }, + // NetworkProfile: &armpanngfw.NetworkProfile{ + // EnableEgressNat: to.Ptr(armpanngfw.EgressNatENABLED), + // NetworkType: to.Ptr(armpanngfw.NetworkTypeVNET), + // PublicIPs: []*armpanngfw.IPAddress{ + // { + // Address: to.Ptr("20.22.92.11"), + // ResourceID: to.Ptr("/subscriptions/01c7d41f-afaf-464e-8a8b-5c6f9f98cee8/resourceGroups/mj-liftr-integration/providers/Microsoft.Network/publicIPAddresses/mj-liftr-integration-PublicIp1"), + // }}, + // }, + // PlanData: &armpanngfw.PlanData{ + // BillingCycle: to.Ptr(armpanngfw.BillingCycleWEEKLY), + // PlanID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // }, + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/Firewalls_CreateOrUpdate_MaximumSet_Gen.json +func ExampleFirewallsClient_BeginCreateOrUpdate_firewallsCreateOrUpdateMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewFirewallsClient().BeginCreateOrUpdate(ctx, "firewall-rg", "firewall1", armpanngfw.FirewallResource{ + Location: to.Ptr("eastus"), + Tags: map[string]*string{ + "tagName": to.Ptr("value"), + }, + Identity: &armpanngfw.AzureResourceManagerManagedIdentityProperties{ + Type: to.Ptr(armpanngfw.ManagedIdentityTypeNone), + UserAssignedIdentities: map[string]*armpanngfw.AzureResourceManagerUserAssignedIdentity{ + "key16": { + ClientID: to.Ptr("aaaa"), + PrincipalID: to.Ptr("aaaaaaaaaaaaaaa"), + }, + }, + }, + Properties: &armpanngfw.FirewallDeploymentProperties{ + AssociatedRulestack: &armpanngfw.RulestackDetails{ + Location: to.Ptr("eastus"), + ResourceID: to.Ptr("lrs1"), + RulestackID: to.Ptr("PANRSID"), + }, + DNSSettings: &armpanngfw.DNSSettings{ + DNSServers: []*armpanngfw.IPAddress{ + { + Address: to.Ptr("20.22.92.111"), + ResourceID: to.Ptr("/subscriptions/01c7d41f-afaf-464e-8a8b-5c6f9f98cee8/resourceGroups/mj-liftr-integration/providers/Microsoft.Network/publicIPAddresses/mj-liftr-integration-egressNatIp1"), + }}, + EnableDNSProxy: to.Ptr(armpanngfw.DNSProxyDISABLED), + EnabledDNSType: to.Ptr(armpanngfw.EnabledDNSTypeCUSTOM), + }, + FrontEndSettings: []*armpanngfw.FrontendSetting{ + { + Name: to.Ptr("frontendsetting11"), + BackendConfiguration: &armpanngfw.EndpointConfiguration{ + Address: &armpanngfw.IPAddress{ + Address: to.Ptr("20.22.32.136"), + ResourceID: to.Ptr("/subscriptions/01c7d41f-afaf-464e-8a8b-5c6f9f98cee8/resourceGroups/mj-liftr-integration/providers/Microsoft.Network/publicIPAddresses/mj-liftr-integration-frontendSettingIp2"), + }, + Port: to.Ptr("80"), + }, + FrontendConfiguration: &armpanngfw.EndpointConfiguration{ + Address: &armpanngfw.IPAddress{ + Address: to.Ptr("20.22.91.251"), + ResourceID: to.Ptr("/subscriptions/01c7d41f-afaf-464e-8a8b-5c6f9f98cee8/resourceGroups/mj-liftr-integration/providers/Microsoft.Network/publicIPAddresses/mj-liftr-integration-frontendSettingIp1"), + }, + Port: to.Ptr("80"), + }, + Protocol: to.Ptr(armpanngfw.ProtocolTypeTCP), + }}, + IsPanoramaManaged: to.Ptr(armpanngfw.BooleanEnumTRUE), + MarketplaceDetails: &armpanngfw.MarketplaceDetails{ + MarketplaceSubscriptionStatus: to.Ptr(armpanngfw.MarketplaceSubscriptionStatusPendingFulfillmentStart), + OfferID: to.Ptr("liftr-pan-ame-test"), + PublisherID: to.Ptr("isvtestuklegacy"), + }, + NetworkProfile: &armpanngfw.NetworkProfile{ + EgressNatIP: []*armpanngfw.IPAddress{ + { + Address: to.Ptr("20.22.92.111"), + ResourceID: to.Ptr("/subscriptions/01c7d41f-afaf-464e-8a8b-5c6f9f98cee8/resourceGroups/mj-liftr-integration/providers/Microsoft.Network/publicIPAddresses/mj-liftr-integration-egressNatIp1"), + }}, + EnableEgressNat: to.Ptr(armpanngfw.EgressNatENABLED), + NetworkType: to.Ptr(armpanngfw.NetworkTypeVNET), + PublicIPs: []*armpanngfw.IPAddress{ + { + Address: to.Ptr("20.22.92.11"), + ResourceID: to.Ptr("/subscriptions/01c7d41f-afaf-464e-8a8b-5c6f9f98cee8/resourceGroups/mj-liftr-integration/providers/Microsoft.Network/publicIPAddresses/mj-liftr-integration-PublicIp1"), + }}, + VnetConfiguration: &armpanngfw.VnetConfiguration{ + IPOfTrustSubnetForUdr: &armpanngfw.IPAddress{ + Address: to.Ptr("10.1.1.0/24"), + ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-untrust-subnet"), + }, + TrustSubnet: &armpanngfw.IPAddressSpace{ + AddressSpace: to.Ptr("10.1.1.0/24"), + ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-trust-subnet"), + }, + UnTrustSubnet: &armpanngfw.IPAddressSpace{ + AddressSpace: to.Ptr("10.1.1.0/24"), + ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-untrust-subnet"), + }, + Vnet: &armpanngfw.IPAddressSpace{ + AddressSpace: to.Ptr("10.1.0.0/16"), + ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet"), + }, + }, + VwanConfiguration: &armpanngfw.VwanConfiguration{ + IPOfTrustSubnetForUdr: &armpanngfw.IPAddress{ + Address: to.Ptr("10.1.1.0/24"), + ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-untrust-subnet"), + }, + NetworkVirtualApplianceID: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c12"), + TrustSubnet: &armpanngfw.IPAddressSpace{ + AddressSpace: to.Ptr("10.1.1.0/24"), + ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-trust-subnet"), + }, + UnTrustSubnet: &armpanngfw.IPAddressSpace{ + AddressSpace: to.Ptr("10.1.1.0/24"), + ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-untrust-subnet"), + }, + VHub: &armpanngfw.IPAddressSpace{ + AddressSpace: to.Ptr("10.1.1.0/24"), + ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-untrust-subnet"), + }, + }, + }, + PanEtag: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c12"), + PanoramaConfig: &armpanngfw.PanoramaConfig{ + ConfigString: to.Ptr("bas64EncodedString"), + }, + PlanData: &armpanngfw.PlanData{ + BillingCycle: to.Ptr(armpanngfw.BillingCycleMONTHLY), + PlanID: to.Ptr("liftrpantestplan"), + UsageType: to.Ptr(armpanngfw.UsageTypePAYG), + }, + ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateAccepted), + }, + }, nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + res, err := poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.FirewallResource = armpanngfw.FirewallResource{ + // Name: to.Ptr("armid1"), + // Type: to.Ptr("firewalls"), + // ID: to.Ptr("/providers/PaloAltoNetworks.Cloudngfw/globalrulestacks/armid1/firewalls/armid1"), + // SystemData: &armpanngfw.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // CreatedBy: to.Ptr("praval"), + // CreatedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // LastModifiedBy: to.Ptr("praval"), + // LastModifiedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // }, + // Location: to.Ptr("eastus"), + // Tags: map[string]*string{ + // "tagName": to.Ptr("value"), + // }, + // Identity: &armpanngfw.AzureResourceManagerManagedIdentityProperties{ + // Type: to.Ptr(armpanngfw.ManagedIdentityTypeNone), + // PrincipalID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaa"), + // TenantID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // UserAssignedIdentities: map[string]*armpanngfw.AzureResourceManagerUserAssignedIdentity{ + // "key16": &armpanngfw.AzureResourceManagerUserAssignedIdentity{ + // ClientID: to.Ptr("aaaa"), + // PrincipalID: to.Ptr("aaaaaaaaaaaaaaa"), + // }, + // }, + // }, + // Properties: &armpanngfw.FirewallDeploymentProperties{ + // AssociatedRulestack: &armpanngfw.RulestackDetails{ + // Location: to.Ptr("eastus"), + // ResourceID: to.Ptr("aaaaaaaaaa"), + // RulestackID: to.Ptr("aaaaaaaaaaaaaaaa"), + // }, + // DNSSettings: &armpanngfw.DNSSettings{ + // DNSServers: []*armpanngfw.IPAddress{ + // { + // Address: to.Ptr("20.22.92.111"), + // ResourceID: to.Ptr("/subscriptions/01c7d41f-afaf-464e-8a8b-5c6f9f98cee8/resourceGroups/mj-liftr-integration/providers/Microsoft.Network/publicIPAddresses/mj-liftr-integration-egressNatIp1"), + // }}, + // EnableDNSProxy: to.Ptr(armpanngfw.DNSProxyDISABLED), + // EnabledDNSType: to.Ptr(armpanngfw.EnabledDNSTypeCUSTOM), + // }, + // FrontEndSettings: []*armpanngfw.FrontendSetting{ + // { + // Name: to.Ptr("frontendsetting11"), + // BackendConfiguration: &armpanngfw.EndpointConfiguration{ + // Address: &armpanngfw.IPAddress{ + // Address: to.Ptr("20.22.32.136"), + // ResourceID: to.Ptr("/subscriptions/01c7d41f-afaf-464e-8a8b-5c6f9f98cee8/resourceGroups/mj-liftr-integration/providers/Microsoft.Network/publicIPAddresses/mj-liftr-integration-frontendSettingIp2"), + // }, + // Port: to.Ptr("80"), + // }, + // FrontendConfiguration: &armpanngfw.EndpointConfiguration{ + // Address: &armpanngfw.IPAddress{ + // Address: to.Ptr("20.22.91.251"), + // ResourceID: to.Ptr("/subscriptions/01c7d41f-afaf-464e-8a8b-5c6f9f98cee8/resourceGroups/mj-liftr-integration/providers/Microsoft.Network/publicIPAddresses/mj-liftr-integration-frontendSettingIp1"), + // }, + // Port: to.Ptr("80"), + // }, + // Protocol: to.Ptr(armpanngfw.ProtocolTypeTCP), + // }}, + // IsPanoramaManaged: to.Ptr(armpanngfw.BooleanEnumTRUE), + // MarketplaceDetails: &armpanngfw.MarketplaceDetails{ + // MarketplaceSubscriptionID: to.Ptr("aa"), + // MarketplaceSubscriptionStatus: to.Ptr(armpanngfw.MarketplaceSubscriptionStatusPendingFulfillmentStart), + // OfferID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // PublisherID: to.Ptr("aaaa"), + // }, + // NetworkProfile: &armpanngfw.NetworkProfile{ + // EgressNatIP: []*armpanngfw.IPAddress{ + // { + // Address: to.Ptr("20.22.92.111"), + // ResourceID: to.Ptr("/subscriptions/01c7d41f-afaf-464e-8a8b-5c6f9f98cee8/resourceGroups/mj-liftr-integration/providers/Microsoft.Network/publicIPAddresses/mj-liftr-integration-egressNatIp1"), + // }}, + // EnableEgressNat: to.Ptr(armpanngfw.EgressNatENABLED), + // NetworkType: to.Ptr(armpanngfw.NetworkTypeVNET), + // PublicIPs: []*armpanngfw.IPAddress{ + // { + // Address: to.Ptr("20.22.92.11"), + // ResourceID: to.Ptr("/subscriptions/01c7d41f-afaf-464e-8a8b-5c6f9f98cee8/resourceGroups/mj-liftr-integration/providers/Microsoft.Network/publicIPAddresses/mj-liftr-integration-PublicIp1"), + // }}, + // VnetConfiguration: &armpanngfw.VnetConfiguration{ + // IPOfTrustSubnetForUdr: &armpanngfw.IPAddress{ + // Address: to.Ptr("10.1.1.0/24"), + // ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-untrust-subnet"), + // }, + // TrustSubnet: &armpanngfw.IPAddressSpace{ + // AddressSpace: to.Ptr("10.1.1.0/24"), + // ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-trust-subnet"), + // }, + // UnTrustSubnet: &armpanngfw.IPAddressSpace{ + // AddressSpace: to.Ptr("10.1.1.0/24"), + // ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-untrust-subnet"), + // }, + // Vnet: &armpanngfw.IPAddressSpace{ + // AddressSpace: to.Ptr("10.1.0.0/16"), + // ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet"), + // }, + // }, + // VwanConfiguration: &armpanngfw.VwanConfiguration{ + // IPOfTrustSubnetForUdr: &armpanngfw.IPAddress{ + // Address: to.Ptr("10.1.1.0/24"), + // ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-untrust-subnet"), + // }, + // NetworkVirtualApplianceID: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c12"), + // TrustSubnet: &armpanngfw.IPAddressSpace{ + // AddressSpace: to.Ptr("10.1.1.0/24"), + // ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-trust-subnet"), + // }, + // UnTrustSubnet: &armpanngfw.IPAddressSpace{ + // AddressSpace: to.Ptr("10.1.1.0/24"), + // ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-untrust-subnet"), + // }, + // VHub: &armpanngfw.IPAddressSpace{ + // AddressSpace: to.Ptr("10.1.1.0/24"), + // ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-untrust-subnet"), + // }, + // }, + // }, + // PanEtag: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c12"), + // PanoramaConfig: &armpanngfw.PanoramaConfig{ + // CgName: to.Ptr("PanoramaCollectorGroup"), + // ConfigString: to.Ptr("bas64EncodedString"), + // DgName: to.Ptr("PanoramaDeviceGroup"), + // HostName: to.Ptr("hostname"), + // PanoramaServer: to.Ptr("10.25.1.1"), + // PanoramaServer2: to.Ptr("10.20.1.1"), + // TplName: to.Ptr("PanoramaTemplateStack"), + // VMAuthKey: to.Ptr("SSH_AUTH_KEY"), + // }, + // PlanData: &armpanngfw.PlanData{ + // BillingCycle: to.Ptr(armpanngfw.BillingCycleWEEKLY), + // EffectiveDate: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-13T00:46:05.283Z"); return t}()), + // PlanID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // UsageType: to.Ptr(armpanngfw.UsageTypePAYG), + // }, + // ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateSucceeded), + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/Firewalls_CreateOrUpdate_MinimumSet_Gen.json +func ExampleFirewallsClient_BeginCreateOrUpdate_firewallsCreateOrUpdateMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewFirewallsClient().BeginCreateOrUpdate(ctx, "firewall-rg", "firewall1", armpanngfw.FirewallResource{ + Location: to.Ptr("eastus"), + Properties: &armpanngfw.FirewallDeploymentProperties{ + DNSSettings: &armpanngfw.DNSSettings{}, + MarketplaceDetails: &armpanngfw.MarketplaceDetails{ + OfferID: to.Ptr("liftr-pan-ame-test"), + PublisherID: to.Ptr("isvtestuklegacy"), + }, + NetworkProfile: &armpanngfw.NetworkProfile{ + EnableEgressNat: to.Ptr(armpanngfw.EgressNatENABLED), + NetworkType: to.Ptr(armpanngfw.NetworkTypeVNET), + PublicIPs: []*armpanngfw.IPAddress{ + { + Address: to.Ptr("20.22.92.11"), + ResourceID: to.Ptr("/subscriptions/01c7d41f-afaf-464e-8a8b-5c6f9f98cee8/resourceGroups/mj-liftr-integration/providers/Microsoft.Network/publicIPAddresses/mj-liftr-integration-PublicIp1"), + }}, + }, + PlanData: &armpanngfw.PlanData{ + BillingCycle: to.Ptr(armpanngfw.BillingCycleMONTHLY), + PlanID: to.Ptr("liftrpantestplan"), + }, + }, + }, nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + res, err := poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.FirewallResource = armpanngfw.FirewallResource{ + // ID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourcegroups/firewall-rg/providers/PaloAltoNetworks.Cloudngfw/firewalls/firewall1"), + // Location: to.Ptr("eastus"), + // Properties: &armpanngfw.FirewallDeploymentProperties{ + // DNSSettings: &armpanngfw.DNSSettings{ + // }, + // MarketplaceDetails: &armpanngfw.MarketplaceDetails{ + // OfferID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // PublisherID: to.Ptr("aaaa"), + // }, + // NetworkProfile: &armpanngfw.NetworkProfile{ + // EnableEgressNat: to.Ptr(armpanngfw.EgressNatENABLED), + // NetworkType: to.Ptr(armpanngfw.NetworkTypeVNET), + // PublicIPs: []*armpanngfw.IPAddress{ + // { + // Address: to.Ptr("20.22.92.11"), + // ResourceID: to.Ptr("/subscriptions/01c7d41f-afaf-464e-8a8b-5c6f9f98cee8/resourceGroups/mj-liftr-integration/providers/Microsoft.Network/publicIPAddresses/mj-liftr-integration-PublicIp1"), + // }}, + // }, + // PlanData: &armpanngfw.PlanData{ + // BillingCycle: to.Ptr(armpanngfw.BillingCycleWEEKLY), + // PlanID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // }, + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/Firewalls_Update_MaximumSet_Gen.json +func ExampleFirewallsClient_Update_firewallsUpdateMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewFirewallsClient().Update(ctx, "firewall-rg", "firewall1", armpanngfw.FirewallResourceUpdate{ + Identity: &armpanngfw.AzureResourceManagerManagedIdentityProperties{ + Type: to.Ptr(armpanngfw.ManagedIdentityTypeNone), + UserAssignedIdentities: map[string]*armpanngfw.AzureResourceManagerUserAssignedIdentity{ + "key16": { + ClientID: to.Ptr("aaaa"), + PrincipalID: to.Ptr("aaaaaaaaaaaaaaa"), + }, + }, + }, + Properties: &armpanngfw.FirewallResourceUpdateProperties{ + AssociatedRulestack: &armpanngfw.RulestackDetails{ + Location: to.Ptr("eastus"), + ResourceID: to.Ptr("aaaaaaaaaa"), + RulestackID: to.Ptr("aaaaaaaaaaaaaaaa"), + }, + DNSSettings: &armpanngfw.DNSSettings{ + DNSServers: []*armpanngfw.IPAddress{ + { + Address: to.Ptr("20.22.92.111"), + ResourceID: to.Ptr("/subscriptions/01c7d41f-afaf-464e-8a8b-5c6f9f98cee8/resourceGroups/mj-liftr-integration/providers/Microsoft.Network/publicIPAddresses/mj-liftr-integration-egressNatIp1"), + }}, + EnableDNSProxy: to.Ptr(armpanngfw.DNSProxyDISABLED), + EnabledDNSType: to.Ptr(armpanngfw.EnabledDNSTypeCUSTOM), + }, + FrontEndSettings: []*armpanngfw.FrontendSetting{ + { + Name: to.Ptr("frontendsetting11"), + BackendConfiguration: &armpanngfw.EndpointConfiguration{ + Address: &armpanngfw.IPAddress{ + Address: to.Ptr("20.22.32.136"), + ResourceID: to.Ptr("/subscriptions/01c7d41f-afaf-464e-8a8b-5c6f9f98cee8/resourceGroups/mj-liftr-integration/providers/Microsoft.Network/publicIPAddresses/mj-liftr-integration-frontendSettingIp2"), + }, + Port: to.Ptr("80"), + }, + FrontendConfiguration: &armpanngfw.EndpointConfiguration{ + Address: &armpanngfw.IPAddress{ + Address: to.Ptr("20.22.91.251"), + ResourceID: to.Ptr("/subscriptions/01c7d41f-afaf-464e-8a8b-5c6f9f98cee8/resourceGroups/mj-liftr-integration/providers/Microsoft.Network/publicIPAddresses/mj-liftr-integration-frontendSettingIp1"), + }, + Port: to.Ptr("80"), + }, + Protocol: to.Ptr(armpanngfw.ProtocolTypeTCP), + }}, + IsPanoramaManaged: to.Ptr(armpanngfw.BooleanEnumTRUE), + MarketplaceDetails: &armpanngfw.MarketplaceDetails{ + MarketplaceSubscriptionStatus: to.Ptr(armpanngfw.MarketplaceSubscriptionStatusPendingFulfillmentStart), + OfferID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + PublisherID: to.Ptr("aaaa"), + }, + NetworkProfile: &armpanngfw.NetworkProfile{ + EgressNatIP: []*armpanngfw.IPAddress{ + { + Address: to.Ptr("20.22.92.111"), + ResourceID: to.Ptr("/subscriptions/01c7d41f-afaf-464e-8a8b-5c6f9f98cee8/resourceGroups/mj-liftr-integration/providers/Microsoft.Network/publicIPAddresses/mj-liftr-integration-egressNatIp1"), + }}, + EnableEgressNat: to.Ptr(armpanngfw.EgressNatENABLED), + NetworkType: to.Ptr(armpanngfw.NetworkTypeVNET), + PublicIPs: []*armpanngfw.IPAddress{ + { + Address: to.Ptr("20.22.92.11"), + ResourceID: to.Ptr("/subscriptions/01c7d41f-afaf-464e-8a8b-5c6f9f98cee8/resourceGroups/mj-liftr-integration/providers/Microsoft.Network/publicIPAddresses/mj-liftr-integration-PublicIp1"), + }}, + VnetConfiguration: &armpanngfw.VnetConfiguration{ + IPOfTrustSubnetForUdr: &armpanngfw.IPAddress{ + Address: to.Ptr("10.1.1.0/24"), + ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-untrust-subnet"), + }, + TrustSubnet: &armpanngfw.IPAddressSpace{ + AddressSpace: to.Ptr("10.1.1.0/24"), + ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-trust-subnet"), + }, + UnTrustSubnet: &armpanngfw.IPAddressSpace{ + AddressSpace: to.Ptr("10.1.1.0/24"), + ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-untrust-subnet"), + }, + Vnet: &armpanngfw.IPAddressSpace{ + AddressSpace: to.Ptr("10.1.0.0/16"), + ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet"), + }, + }, + VwanConfiguration: &armpanngfw.VwanConfiguration{ + IPOfTrustSubnetForUdr: &armpanngfw.IPAddress{ + Address: to.Ptr("10.1.1.0/24"), + ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-untrust-subnet"), + }, + NetworkVirtualApplianceID: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c12"), + TrustSubnet: &armpanngfw.IPAddressSpace{ + AddressSpace: to.Ptr("10.1.1.0/24"), + ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-trust-subnet"), + }, + UnTrustSubnet: &armpanngfw.IPAddressSpace{ + AddressSpace: to.Ptr("10.1.1.0/24"), + ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-untrust-subnet"), + }, + VHub: &armpanngfw.IPAddressSpace{ + AddressSpace: to.Ptr("10.1.1.0/24"), + ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-untrust-subnet"), + }, + }, + }, + PanEtag: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c12"), + PanoramaConfig: &armpanngfw.PanoramaConfig{ + ConfigString: to.Ptr("bas64EncodedString"), + }, + PlanData: &armpanngfw.PlanData{ + BillingCycle: to.Ptr(armpanngfw.BillingCycleWEEKLY), + PlanID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + UsageType: to.Ptr(armpanngfw.UsageTypePAYG), + }, + }, + Tags: map[string]*string{ + "tagName": to.Ptr("value"), + }, + }, nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.FirewallResource = armpanngfw.FirewallResource{ + // Name: to.Ptr("aaaaaaaaaaaaa"), + // Type: to.Ptr("aaaaaa"), + // ID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaa"), + // SystemData: &armpanngfw.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // CreatedBy: to.Ptr("praval"), + // CreatedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // LastModifiedBy: to.Ptr("praval"), + // LastModifiedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // }, + // Location: to.Ptr("eastus"), + // Tags: map[string]*string{ + // "tagName": to.Ptr("value"), + // }, + // Identity: &armpanngfw.AzureResourceManagerManagedIdentityProperties{ + // Type: to.Ptr(armpanngfw.ManagedIdentityTypeNone), + // PrincipalID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaa"), + // TenantID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // UserAssignedIdentities: map[string]*armpanngfw.AzureResourceManagerUserAssignedIdentity{ + // "key16": &armpanngfw.AzureResourceManagerUserAssignedIdentity{ + // ClientID: to.Ptr("aaaa"), + // PrincipalID: to.Ptr("aaaaaaaaaaaaaaa"), + // }, + // }, + // }, + // Properties: &armpanngfw.FirewallDeploymentProperties{ + // AssociatedRulestack: &armpanngfw.RulestackDetails{ + // Location: to.Ptr("eastus"), + // ResourceID: to.Ptr("aaaaaaaaaa"), + // RulestackID: to.Ptr("aaaaaaaaaaaaaaaa"), + // }, + // DNSSettings: &armpanngfw.DNSSettings{ + // DNSServers: []*armpanngfw.IPAddress{ + // { + // Address: to.Ptr("20.22.92.111"), + // ResourceID: to.Ptr("/subscriptions/01c7d41f-afaf-464e-8a8b-5c6f9f98cee8/resourceGroups/mj-liftr-integration/providers/Microsoft.Network/publicIPAddresses/mj-liftr-integration-egressNatIp1"), + // }}, + // EnableDNSProxy: to.Ptr(armpanngfw.DNSProxyDISABLED), + // EnabledDNSType: to.Ptr(armpanngfw.EnabledDNSTypeCUSTOM), + // }, + // FrontEndSettings: []*armpanngfw.FrontendSetting{ + // { + // Name: to.Ptr("frontendsetting11"), + // BackendConfiguration: &armpanngfw.EndpointConfiguration{ + // Address: &armpanngfw.IPAddress{ + // Address: to.Ptr("20.22.32.136"), + // ResourceID: to.Ptr("/subscriptions/01c7d41f-afaf-464e-8a8b-5c6f9f98cee8/resourceGroups/mj-liftr-integration/providers/Microsoft.Network/publicIPAddresses/mj-liftr-integration-frontendSettingIp2"), + // }, + // Port: to.Ptr("80"), + // }, + // FrontendConfiguration: &armpanngfw.EndpointConfiguration{ + // Address: &armpanngfw.IPAddress{ + // Address: to.Ptr("20.22.91.251"), + // ResourceID: to.Ptr("/subscriptions/01c7d41f-afaf-464e-8a8b-5c6f9f98cee8/resourceGroups/mj-liftr-integration/providers/Microsoft.Network/publicIPAddresses/mj-liftr-integration-frontendSettingIp1"), + // }, + // Port: to.Ptr("80"), + // }, + // Protocol: to.Ptr(armpanngfw.ProtocolTypeTCP), + // }}, + // IsPanoramaManaged: to.Ptr(armpanngfw.BooleanEnumTRUE), + // MarketplaceDetails: &armpanngfw.MarketplaceDetails{ + // MarketplaceSubscriptionID: to.Ptr("aa"), + // MarketplaceSubscriptionStatus: to.Ptr(armpanngfw.MarketplaceSubscriptionStatusPendingFulfillmentStart), + // OfferID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // PublisherID: to.Ptr("aaaa"), + // }, + // NetworkProfile: &armpanngfw.NetworkProfile{ + // EgressNatIP: []*armpanngfw.IPAddress{ + // { + // Address: to.Ptr("20.22.92.111"), + // ResourceID: to.Ptr("/subscriptions/01c7d41f-afaf-464e-8a8b-5c6f9f98cee8/resourceGroups/mj-liftr-integration/providers/Microsoft.Network/publicIPAddresses/mj-liftr-integration-egressNatIp1"), + // }}, + // EnableEgressNat: to.Ptr(armpanngfw.EgressNatENABLED), + // NetworkType: to.Ptr(armpanngfw.NetworkTypeVNET), + // PublicIPs: []*armpanngfw.IPAddress{ + // { + // Address: to.Ptr("20.22.92.11"), + // ResourceID: to.Ptr("/subscriptions/01c7d41f-afaf-464e-8a8b-5c6f9f98cee8/resourceGroups/mj-liftr-integration/providers/Microsoft.Network/publicIPAddresses/mj-liftr-integration-PublicIp1"), + // }}, + // VnetConfiguration: &armpanngfw.VnetConfiguration{ + // IPOfTrustSubnetForUdr: &armpanngfw.IPAddress{ + // Address: to.Ptr("10.1.1.0/24"), + // ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-untrust-subnet"), + // }, + // TrustSubnet: &armpanngfw.IPAddressSpace{ + // AddressSpace: to.Ptr("10.1.1.0/24"), + // ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-trust-subnet"), + // }, + // UnTrustSubnet: &armpanngfw.IPAddressSpace{ + // AddressSpace: to.Ptr("10.1.1.0/24"), + // ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-untrust-subnet"), + // }, + // Vnet: &armpanngfw.IPAddressSpace{ + // AddressSpace: to.Ptr("10.1.0.0/16"), + // ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet"), + // }, + // }, + // VwanConfiguration: &armpanngfw.VwanConfiguration{ + // IPOfTrustSubnetForUdr: &armpanngfw.IPAddress{ + // Address: to.Ptr("10.1.1.0/24"), + // ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-untrust-subnet"), + // }, + // NetworkVirtualApplianceID: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c12"), + // TrustSubnet: &armpanngfw.IPAddressSpace{ + // AddressSpace: to.Ptr("10.1.1.0/24"), + // ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-trust-subnet"), + // }, + // UnTrustSubnet: &armpanngfw.IPAddressSpace{ + // AddressSpace: to.Ptr("10.1.1.0/24"), + // ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-untrust-subnet"), + // }, + // VHub: &armpanngfw.IPAddressSpace{ + // AddressSpace: to.Ptr("10.1.1.0/24"), + // ResourceID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/os-liftr-integration/providers/Microsoft.Network/virtualNetworks/os-liftr-integration-vnet/subnets/os-liftr-integration-untrust-subnet"), + // }, + // }, + // }, + // PanEtag: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c12"), + // PanoramaConfig: &armpanngfw.PanoramaConfig{ + // CgName: to.Ptr("PanoramaCollectorGroup"), + // ConfigString: to.Ptr("bas64EncodedString"), + // DgName: to.Ptr("PanoramaDeviceGroup"), + // HostName: to.Ptr("hostname"), + // PanoramaServer: to.Ptr("10.25.1.1"), + // PanoramaServer2: to.Ptr("10.20.1.1"), + // TplName: to.Ptr("PanoramaTemplateStack"), + // VMAuthKey: to.Ptr("SSH_AUTH_KEY"), + // }, + // PlanData: &armpanngfw.PlanData{ + // BillingCycle: to.Ptr(armpanngfw.BillingCycleWEEKLY), + // EffectiveDate: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-13T00:46:05.283Z"); return t}()), + // PlanID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // UsageType: to.Ptr(armpanngfw.UsageTypePAYG), + // }, + // ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateSucceeded), + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/Firewalls_Update_MinimumSet_Gen.json +func ExampleFirewallsClient_Update_firewallsUpdateMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewFirewallsClient().Update(ctx, "firewall-rg", "firewall1", armpanngfw.FirewallResourceUpdate{}, nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.FirewallResource = armpanngfw.FirewallResource{ + // Location: to.Ptr("eastus"), + // Properties: &armpanngfw.FirewallDeploymentProperties{ + // DNSSettings: &armpanngfw.DNSSettings{ + // }, + // MarketplaceDetails: &armpanngfw.MarketplaceDetails{ + // OfferID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // PublisherID: to.Ptr("aaaa"), + // }, + // NetworkProfile: &armpanngfw.NetworkProfile{ + // EnableEgressNat: to.Ptr(armpanngfw.EgressNatENABLED), + // NetworkType: to.Ptr(armpanngfw.NetworkTypeVNET), + // PublicIPs: []*armpanngfw.IPAddress{ + // { + // Address: to.Ptr("20.22.92.11"), + // ResourceID: to.Ptr("/subscriptions/01c7d41f-afaf-464e-8a8b-5c6f9f98cee8/resourceGroups/mj-liftr-integration/providers/Microsoft.Network/publicIPAddresses/mj-liftr-integration-PublicIp1"), + // }}, + // }, + // PlanData: &armpanngfw.PlanData{ + // BillingCycle: to.Ptr(armpanngfw.BillingCycleWEEKLY), + // PlanID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // }, + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/Firewalls_Delete_MaximumSet_Gen.json +func ExampleFirewallsClient_BeginDelete_firewallsDeleteMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewFirewallsClient().BeginDelete(ctx, "firewall-rg", "firewall1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/Firewalls_Delete_MinimumSet_Gen.json +func ExampleFirewallsClient_BeginDelete_firewallsDeleteMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewFirewallsClient().BeginDelete(ctx, "firewall-rg", "firewall1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/Firewalls_getGlobalRulestack_MaximumSet_Gen.json +func ExampleFirewallsClient_GetGlobalRulestack_firewallsGetGlobalRulestackMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewFirewallsClient().GetGlobalRulestack(ctx, "firewall-rg", "firewall1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.GlobalRulestackInfo = armpanngfw.GlobalRulestackInfo{ + // AzureID: to.Ptr("aaaaaaaaaa"), + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/Firewalls_getGlobalRulestack_MinimumSet_Gen.json +func ExampleFirewallsClient_GetGlobalRulestack_firewallsGetGlobalRulestackMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewFirewallsClient().GetGlobalRulestack(ctx, "firewall-rg", "firewall1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.GlobalRulestackInfo = armpanngfw.GlobalRulestackInfo{ + // AzureID: to.Ptr("aaaaaaaaaa"), + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/Firewalls_getLogProfile_MaximumSet_Gen.json +func ExampleFirewallsClient_GetLogProfile_firewallsGetLogProfileMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewFirewallsClient().GetLogProfile(ctx, "firewall-rg", "firewall1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.LogSettings = armpanngfw.LogSettings{ + // ApplicationInsights: &armpanngfw.ApplicationInsights{ + // ID: to.Ptr("aaaaaaaaaaaaaaaa"), + // Key: to.Ptr("aaaaaaaaaaaaa"), + // }, + // CommonDestination: &armpanngfw.LogDestination{ + // EventHubConfigurations: &armpanngfw.EventHub{ + // Name: to.Ptr("aaaaaaaa"), + // ID: to.Ptr("aaaaaaaaaa"), + // NameSpace: to.Ptr("aaaaaaaaaaaaaaaaaaaaa"), + // PolicyName: to.Ptr("aaaaaaaaaaaa"), + // SubscriptionID: to.Ptr("aaaaaaaaaa"), + // }, + // MonitorConfigurations: &armpanngfw.MonitorLog{ + // ID: to.Ptr("aaaaaaaaaaaaaaaaaaa"), + // PrimaryKey: to.Ptr("aaaaaaaaaaaaa"), + // SecondaryKey: to.Ptr("a"), + // SubscriptionID: to.Ptr("aaaaaaaaaaaaa"), + // Workspace: to.Ptr("aaaaaaaaaaa"), + // }, + // StorageConfigurations: &armpanngfw.StorageAccount{ + // AccountName: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaa"), + // ID: to.Ptr("aaaaaaaaaaaaaaa"), + // SubscriptionID: to.Ptr("aaaaaaaaa"), + // }, + // }, + // DecryptLogDestination: &armpanngfw.LogDestination{ + // EventHubConfigurations: &armpanngfw.EventHub{ + // Name: to.Ptr("aaaaaaaa"), + // ID: to.Ptr("aaaaaaaaaa"), + // NameSpace: to.Ptr("aaaaaaaaaaaaaaaaaaaaa"), + // PolicyName: to.Ptr("aaaaaaaaaaaa"), + // SubscriptionID: to.Ptr("aaaaaaaaaa"), + // }, + // MonitorConfigurations: &armpanngfw.MonitorLog{ + // ID: to.Ptr("aaaaaaaaaaaaaaaaaaa"), + // PrimaryKey: to.Ptr("aaaaaaaaaaaaa"), + // SecondaryKey: to.Ptr("a"), + // SubscriptionID: to.Ptr("aaaaaaaaaaaaa"), + // Workspace: to.Ptr("aaaaaaaaaaa"), + // }, + // StorageConfigurations: &armpanngfw.StorageAccount{ + // AccountName: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaa"), + // ID: to.Ptr("aaaaaaaaaaaaaaa"), + // SubscriptionID: to.Ptr("aaaaaaaaa"), + // }, + // }, + // LogOption: to.Ptr(armpanngfw.LogOptionSAMEDESTINATION), + // LogType: to.Ptr(armpanngfw.LogTypeTRAFFIC), + // ThreatLogDestination: &armpanngfw.LogDestination{ + // EventHubConfigurations: &armpanngfw.EventHub{ + // Name: to.Ptr("aaaaaaaa"), + // ID: to.Ptr("aaaaaaaaaa"), + // NameSpace: to.Ptr("aaaaaaaaaaaaaaaaaaaaa"), + // PolicyName: to.Ptr("aaaaaaaaaaaa"), + // SubscriptionID: to.Ptr("aaaaaaaaaa"), + // }, + // MonitorConfigurations: &armpanngfw.MonitorLog{ + // ID: to.Ptr("aaaaaaaaaaaaaaaaaaa"), + // PrimaryKey: to.Ptr("aaaaaaaaaaaaa"), + // SecondaryKey: to.Ptr("a"), + // SubscriptionID: to.Ptr("aaaaaaaaaaaaa"), + // Workspace: to.Ptr("aaaaaaaaaaa"), + // }, + // StorageConfigurations: &armpanngfw.StorageAccount{ + // AccountName: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaa"), + // ID: to.Ptr("aaaaaaaaaaaaaaa"), + // SubscriptionID: to.Ptr("aaaaaaaaa"), + // }, + // }, + // TrafficLogDestination: &armpanngfw.LogDestination{ + // EventHubConfigurations: &armpanngfw.EventHub{ + // Name: to.Ptr("aaaaaaaa"), + // ID: to.Ptr("aaaaaaaaaa"), + // NameSpace: to.Ptr("aaaaaaaaaaaaaaaaaaaaa"), + // PolicyName: to.Ptr("aaaaaaaaaaaa"), + // SubscriptionID: to.Ptr("aaaaaaaaaa"), + // }, + // MonitorConfigurations: &armpanngfw.MonitorLog{ + // ID: to.Ptr("aaaaaaaaaaaaaaaaaaa"), + // PrimaryKey: to.Ptr("aaaaaaaaaaaaa"), + // SecondaryKey: to.Ptr("a"), + // SubscriptionID: to.Ptr("aaaaaaaaaaaaa"), + // Workspace: to.Ptr("aaaaaaaaaaa"), + // }, + // StorageConfigurations: &armpanngfw.StorageAccount{ + // AccountName: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaa"), + // ID: to.Ptr("aaaaaaaaaaaaaaa"), + // SubscriptionID: to.Ptr("aaaaaaaaa"), + // }, + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/Firewalls_getLogProfile_MinimumSet_Gen.json +func ExampleFirewallsClient_GetLogProfile_firewallsGetLogProfileMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewFirewallsClient().GetLogProfile(ctx, "firewall-rg", "firewall1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.LogSettings = armpanngfw.LogSettings{ + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/Firewalls_getSupportInfo_MaximumSet_Gen.json +func ExampleFirewallsClient_GetSupportInfo_firewallsGetSupportInfoMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewFirewallsClient().GetSupportInfo(ctx, "rgopenapi", "firewall1", &armpanngfw.FirewallsClientGetSupportInfoOptions{Email: to.Ptr("user1@domain.com")}) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.SupportInfo = armpanngfw.SupportInfo{ + // AccountID: to.Ptr("3cg5b439-294d-4c25-b0b2-ef649e0g6d38"), + // AccountRegistered: to.Ptr(armpanngfw.BooleanEnumTRUE), + // FreeTrial: to.Ptr(armpanngfw.BooleanEnumTRUE), + // FreeTrialCreditLeft: to.Ptr[int32](10), + // FreeTrialDaysLeft: to.Ptr[int32](1), + // HelpURL: to.Ptr("https://ssopreview.paloaltonetworks.com/home/bookmark/0oa4ao61shG4rd3Ub1d7/2557"), + // ProductSerial: to.Ptr("e22715cb-7e4e-4814-ad4f-ccd1417755d7"), + // ProductSKU: to.Ptr("62f63e3c-bc5a-4d68-a8a1-fcba9f526c90"), + // RegisterURL: to.Ptr("https://ssopreview.paloaltonetworks.com/home/bookmark/0oa4ao61shG4rd3Ub1d7/2557"), + // SupportURL: to.Ptr("https://ssopreview.paloaltonetworks.com/home/bookmark/0oa4ao61shG4rd3Ub1d7/2557"), + // UserDomainSupported: to.Ptr(armpanngfw.BooleanEnumTRUE), + // UserRegistered: to.Ptr(armpanngfw.BooleanEnumTRUE), + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/Firewalls_getSupportInfo_MinimumSet_Gen.json +func ExampleFirewallsClient_GetSupportInfo_firewallsGetSupportInfoMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewFirewallsClient().GetSupportInfo(ctx, "rgopenapi", "firewall1", &armpanngfw.FirewallsClientGetSupportInfoOptions{Email: nil}) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.SupportInfo = armpanngfw.SupportInfo{ + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/Firewalls_saveLogProfile_MaximumSet_Gen.json +func ExampleFirewallsClient_SaveLogProfile_firewallsSaveLogProfileMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + _, err = clientFactory.NewFirewallsClient().SaveLogProfile(ctx, "firewall-rg", "firewall1", &armpanngfw.FirewallsClientSaveLogProfileOptions{LogSettings: &armpanngfw.LogSettings{ + ApplicationInsights: &armpanngfw.ApplicationInsights{ + ID: to.Ptr("aaaaaaaaaaaaaaaa"), + Key: to.Ptr("aaaaaaaaaaaaa"), + }, + CommonDestination: &armpanngfw.LogDestination{ + EventHubConfigurations: &armpanngfw.EventHub{ + Name: to.Ptr("aaaaaaaa"), + ID: to.Ptr("aaaaaaaaaa"), + NameSpace: to.Ptr("aaaaaaaaaaaaaaaaaaaaa"), + PolicyName: to.Ptr("aaaaaaaaaaaa"), + SubscriptionID: to.Ptr("aaaaaaaaaa"), + }, + MonitorConfigurations: &armpanngfw.MonitorLog{ + ID: to.Ptr("aaaaaaaaaaaaaaaaaaa"), + PrimaryKey: to.Ptr("aaaaaaaaaaaaa"), + SecondaryKey: to.Ptr("a"), + SubscriptionID: to.Ptr("aaaaaaaaaaaaa"), + Workspace: to.Ptr("aaaaaaaaaaa"), + }, + StorageConfigurations: &armpanngfw.StorageAccount{ + AccountName: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaa"), + ID: to.Ptr("aaaaaaaaaaaaaaa"), + SubscriptionID: to.Ptr("aaaaaaaaa"), + }, + }, + DecryptLogDestination: &armpanngfw.LogDestination{ + EventHubConfigurations: &armpanngfw.EventHub{ + Name: to.Ptr("aaaaaaaa"), + ID: to.Ptr("aaaaaaaaaa"), + NameSpace: to.Ptr("aaaaaaaaaaaaaaaaaaaaa"), + PolicyName: to.Ptr("aaaaaaaaaaaa"), + SubscriptionID: to.Ptr("aaaaaaaaaa"), + }, + MonitorConfigurations: &armpanngfw.MonitorLog{ + ID: to.Ptr("aaaaaaaaaaaaaaaaaaa"), + PrimaryKey: to.Ptr("aaaaaaaaaaaaa"), + SecondaryKey: to.Ptr("a"), + SubscriptionID: to.Ptr("aaaaaaaaaaaaa"), + Workspace: to.Ptr("aaaaaaaaaaa"), + }, + StorageConfigurations: &armpanngfw.StorageAccount{ + AccountName: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaa"), + ID: to.Ptr("aaaaaaaaaaaaaaa"), + SubscriptionID: to.Ptr("aaaaaaaaa"), + }, + }, + LogOption: to.Ptr(armpanngfw.LogOptionSAMEDESTINATION), + LogType: to.Ptr(armpanngfw.LogTypeTRAFFIC), + ThreatLogDestination: &armpanngfw.LogDestination{ + EventHubConfigurations: &armpanngfw.EventHub{ + Name: to.Ptr("aaaaaaaa"), + ID: to.Ptr("aaaaaaaaaa"), + NameSpace: to.Ptr("aaaaaaaaaaaaaaaaaaaaa"), + PolicyName: to.Ptr("aaaaaaaaaaaa"), + SubscriptionID: to.Ptr("aaaaaaaaaa"), + }, + MonitorConfigurations: &armpanngfw.MonitorLog{ + ID: to.Ptr("aaaaaaaaaaaaaaaaaaa"), + PrimaryKey: to.Ptr("aaaaaaaaaaaaa"), + SecondaryKey: to.Ptr("a"), + SubscriptionID: to.Ptr("aaaaaaaaaaaaa"), + Workspace: to.Ptr("aaaaaaaaaaa"), + }, + StorageConfigurations: &armpanngfw.StorageAccount{ + AccountName: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaa"), + ID: to.Ptr("aaaaaaaaaaaaaaa"), + SubscriptionID: to.Ptr("aaaaaaaaa"), + }, + }, + TrafficLogDestination: &armpanngfw.LogDestination{ + EventHubConfigurations: &armpanngfw.EventHub{ + Name: to.Ptr("aaaaaaaa"), + ID: to.Ptr("aaaaaaaaaa"), + NameSpace: to.Ptr("aaaaaaaaaaaaaaaaaaaaa"), + PolicyName: to.Ptr("aaaaaaaaaaaa"), + SubscriptionID: to.Ptr("aaaaaaaaaa"), + }, + MonitorConfigurations: &armpanngfw.MonitorLog{ + ID: to.Ptr("aaaaaaaaaaaaaaaaaaa"), + PrimaryKey: to.Ptr("aaaaaaaaaaaaa"), + SecondaryKey: to.Ptr("a"), + SubscriptionID: to.Ptr("aaaaaaaaaaaaa"), + Workspace: to.Ptr("aaaaaaaaaaa"), + }, + StorageConfigurations: &armpanngfw.StorageAccount{ + AccountName: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaa"), + ID: to.Ptr("aaaaaaaaaaaaaaa"), + SubscriptionID: to.Ptr("aaaaaaaaa"), + }, + }, + }, + }) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/Firewalls_saveLogProfile_MinimumSet_Gen.json +func ExampleFirewallsClient_SaveLogProfile_firewallsSaveLogProfileMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + _, err = clientFactory.NewFirewallsClient().SaveLogProfile(ctx, "firewall-rg", "firewall1", &armpanngfw.FirewallsClientSaveLogProfileOptions{LogSettings: nil}) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } +} diff --git a/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/firewallstatus_client.go b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/firewallstatus_client.go new file mode 100644 index 000000000000..a0ec2d96d03f --- /dev/null +++ b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/firewallstatus_client.go @@ -0,0 +1,172 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armpanngfw + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// FirewallStatusClient contains the methods for the FirewallStatus group. +// Don't use this type directly, use NewFirewallStatusClient() instead. +type FirewallStatusClient struct { + internal *arm.Client + subscriptionID string +} + +// NewFirewallStatusClient creates a new instance of FirewallStatusClient with the specified values. +// - subscriptionID - The ID of the target subscription. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewFirewallStatusClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*FirewallStatusClient, error) { + cl, err := arm.NewClient(moduleName+".FirewallStatusClient", moduleVersion, credential, options) + if err != nil { + return nil, err + } + client := &FirewallStatusClient{ + subscriptionID: subscriptionID, + internal: cl, + } + return client, nil +} + +// Get - Get a FirewallStatusResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - firewallName - Firewall resource name +// - options - FirewallStatusClientGetOptions contains the optional parameters for the FirewallStatusClient.Get method. +func (client *FirewallStatusClient) Get(ctx context.Context, resourceGroupName string, firewallName string, options *FirewallStatusClientGetOptions) (FirewallStatusClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, firewallName, options) + if err != nil { + return FirewallStatusClientGetResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return FirewallStatusClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return FirewallStatusClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *FirewallStatusClient) getCreateRequest(ctx context.Context, resourceGroupName string, firewallName string, options *FirewallStatusClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/firewalls/{firewallName}/statuses/default" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if firewallName == "" { + return nil, errors.New("parameter firewallName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{firewallName}", url.PathEscape(firewallName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *FirewallStatusClient) getHandleResponse(resp *http.Response) (FirewallStatusClientGetResponse, error) { + result := FirewallStatusClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.FirewallStatusResource); err != nil { + return FirewallStatusClientGetResponse{}, err + } + return result, nil +} + +// NewListByFirewallsPager - List FirewallStatusResource resources by Firewalls +// +// Generated from API version 2022-08-29-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - firewallName - Firewall resource name +// - options - FirewallStatusClientListByFirewallsOptions contains the optional parameters for the FirewallStatusClient.NewListByFirewallsPager +// method. +func (client *FirewallStatusClient) NewListByFirewallsPager(resourceGroupName string, firewallName string, options *FirewallStatusClientListByFirewallsOptions) *runtime.Pager[FirewallStatusClientListByFirewallsResponse] { + return runtime.NewPager(runtime.PagingHandler[FirewallStatusClientListByFirewallsResponse]{ + More: func(page FirewallStatusClientListByFirewallsResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *FirewallStatusClientListByFirewallsResponse) (FirewallStatusClientListByFirewallsResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listByFirewallsCreateRequest(ctx, resourceGroupName, firewallName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return FirewallStatusClientListByFirewallsResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return FirewallStatusClientListByFirewallsResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return FirewallStatusClientListByFirewallsResponse{}, runtime.NewResponseError(resp) + } + return client.listByFirewallsHandleResponse(resp) + }, + }) +} + +// listByFirewallsCreateRequest creates the ListByFirewalls request. +func (client *FirewallStatusClient) listByFirewallsCreateRequest(ctx context.Context, resourceGroupName string, firewallName string, options *FirewallStatusClientListByFirewallsOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/firewalls/{firewallName}/statuses" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if firewallName == "" { + return nil, errors.New("parameter firewallName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{firewallName}", url.PathEscape(firewallName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listByFirewallsHandleResponse handles the ListByFirewalls response. +func (client *FirewallStatusClient) listByFirewallsHandleResponse(resp *http.Response) (FirewallStatusClientListByFirewallsResponse, error) { + result := FirewallStatusClientListByFirewallsResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.FirewallStatusResourceListResult); err != nil { + return FirewallStatusClientListByFirewallsResponse{}, err + } + return result, nil +} diff --git a/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/firewallstatus_client_example_test.go b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/firewallstatus_client_example_test.go new file mode 100644 index 000000000000..160373e157f3 --- /dev/null +++ b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/firewallstatus_client_example_test.go @@ -0,0 +1,170 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armpanngfw_test + +import ( + "context" + "log" + + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw" +) + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/FirewallStatus_ListByFirewalls_MaximumSet_Gen.json +func ExampleFirewallStatusClient_NewListByFirewallsPager_firewallStatusListByFirewallsMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + pager := clientFactory.NewFirewallStatusClient().NewListByFirewallsPager("rgopenapi", "firewall1", nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + log.Fatalf("failed to advance page: %v", err) + } + for _, v := range page.Value { + // You could use page here. We use blank identifier for just demo purposes. + _ = v + } + // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // page.FirewallStatusResourceListResult = armpanngfw.FirewallStatusResourceListResult{ + // Value: []*armpanngfw.FirewallStatusResource{ + // { + // Name: to.Ptr("default"), + // Type: to.Ptr("aaaaaaa"), + // ID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourcegroups/firewall-rg/providers/PaloAltoNetworks.Cloudngfw/firewalls/firewall1/statuses/default"), + // SystemData: &armpanngfw.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // CreatedBy: to.Ptr("praval"), + // CreatedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // LastModifiedBy: to.Ptr("praval"), + // LastModifiedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // }, + // Properties: &armpanngfw.FirewallStatusProperty{ + // HealthReason: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // HealthStatus: to.Ptr(armpanngfw.HealthStatusGREEN), + // IsPanoramaManaged: to.Ptr(armpanngfw.BooleanEnumTRUE), + // PanoramaStatus: &armpanngfw.PanoramaStatus{ + // PanoramaServer2Status: to.Ptr(armpanngfw.ServerStatusUP), + // PanoramaServerStatus: to.Ptr(armpanngfw.ServerStatusUP), + // }, + // ProvisioningState: to.Ptr(armpanngfw.ReadOnlyProvisioningStateSucceeded), + // }, + // }}, + // } + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/FirewallStatus_ListByFirewalls_MinimumSet_Gen.json +func ExampleFirewallStatusClient_NewListByFirewallsPager_firewallStatusListByFirewallsMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + pager := clientFactory.NewFirewallStatusClient().NewListByFirewallsPager("rgopenapi", "firewall1", nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + log.Fatalf("failed to advance page: %v", err) + } + for _, v := range page.Value { + // You could use page here. We use blank identifier for just demo purposes. + _ = v + } + // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // page.FirewallStatusResourceListResult = armpanngfw.FirewallStatusResourceListResult{ + // Value: []*armpanngfw.FirewallStatusResource{ + // { + // ID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourcegroups/firewall-rg/providers/PaloAltoNetworks.Cloudngfw/firewalls/firewall1/statuses/default"), + // Properties: &armpanngfw.FirewallStatusProperty{ + // }, + // }}, + // } + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/FirewallStatus_Get_MaximumSet_Gen.json +func ExampleFirewallStatusClient_Get_firewallStatusGetMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewFirewallStatusClient().Get(ctx, "rgopenapi", "firewall1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.FirewallStatusResource = armpanngfw.FirewallStatusResource{ + // Name: to.Ptr("default"), + // Type: to.Ptr("aaaa"), + // ID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourcegroups/firewall-rg/providers/PaloAltoNetworks.Cloudngfw/firewalls/firewall1/statuses/default"), + // SystemData: &armpanngfw.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // CreatedBy: to.Ptr("praval"), + // CreatedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // LastModifiedBy: to.Ptr("praval"), + // LastModifiedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // }, + // Properties: &armpanngfw.FirewallStatusProperty{ + // HealthReason: to.Ptr("aaaaaaaaaaaa"), + // HealthStatus: to.Ptr(armpanngfw.HealthStatusGREEN), + // IsPanoramaManaged: to.Ptr(armpanngfw.BooleanEnumTRUE), + // PanoramaStatus: &armpanngfw.PanoramaStatus{ + // PanoramaServer2Status: to.Ptr(armpanngfw.ServerStatusUP), + // PanoramaServerStatus: to.Ptr(armpanngfw.ServerStatusUP), + // }, + // ProvisioningState: to.Ptr(armpanngfw.ReadOnlyProvisioningStateSucceeded), + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/FirewallStatus_Get_MinimumSet_Gen.json +func ExampleFirewallStatusClient_Get_firewallStatusGetMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewFirewallStatusClient().Get(ctx, "rgopenapi", "firewall1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.FirewallStatusResource = armpanngfw.FirewallStatusResource{ + // ID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourcegroups/firewall-rg/providers/PaloAltoNetworks.Cloudngfw/firewalls/firewall1/statuses/default"), + // Properties: &armpanngfw.FirewallStatusProperty{ + // }, + // } +} diff --git a/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/fqdnlistglobalrulestack_client.go b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/fqdnlistglobalrulestack_client.go new file mode 100644 index 000000000000..f9fb5d648650 --- /dev/null +++ b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/fqdnlistglobalrulestack_client.go @@ -0,0 +1,284 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armpanngfw + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// FqdnListGlobalRulestackClient contains the methods for the FqdnListGlobalRulestack group. +// Don't use this type directly, use NewFqdnListGlobalRulestackClient() instead. +type FqdnListGlobalRulestackClient struct { + internal *arm.Client +} + +// NewFqdnListGlobalRulestackClient creates a new instance of FqdnListGlobalRulestackClient with the specified values. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewFqdnListGlobalRulestackClient(credential azcore.TokenCredential, options *arm.ClientOptions) (*FqdnListGlobalRulestackClient, error) { + cl, err := arm.NewClient(moduleName+".FqdnListGlobalRulestackClient", moduleVersion, credential, options) + if err != nil { + return nil, err + } + client := &FqdnListGlobalRulestackClient{ + internal: cl, + } + return client, nil +} + +// BeginCreateOrUpdate - Create a FqdnListGlobalRulestackResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - globalRulestackName - GlobalRulestack resource name +// - name - fqdn list name +// - resource - Resource create parameters. +// - options - FqdnListGlobalRulestackClientBeginCreateOrUpdateOptions contains the optional parameters for the FqdnListGlobalRulestackClient.BeginCreateOrUpdate +// method. +func (client *FqdnListGlobalRulestackClient) BeginCreateOrUpdate(ctx context.Context, globalRulestackName string, name string, resource FqdnListGlobalRulestackResource, options *FqdnListGlobalRulestackClientBeginCreateOrUpdateOptions) (*runtime.Poller[FqdnListGlobalRulestackClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, globalRulestackName, name, resource, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[FqdnListGlobalRulestackClientCreateOrUpdateResponse]{ + FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + }) + } else { + return runtime.NewPollerFromResumeToken[FqdnListGlobalRulestackClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// CreateOrUpdate - Create a FqdnListGlobalRulestackResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +func (client *FqdnListGlobalRulestackClient) createOrUpdate(ctx context.Context, globalRulestackName string, name string, resource FqdnListGlobalRulestackResource, options *FqdnListGlobalRulestackClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, globalRulestackName, name, resource, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *FqdnListGlobalRulestackClient) createOrUpdateCreateRequest(ctx context.Context, globalRulestackName string, name string, resource FqdnListGlobalRulestackResource, options *FqdnListGlobalRulestackClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/{globalRulestackName}/fqdnlists/{name}" + if globalRulestackName == "" { + return nil, errors.New("parameter globalRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{globalRulestackName}", url.PathEscape(globalRulestackName)) + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, resource) +} + +// BeginDelete - Delete a FqdnListGlobalRulestackResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - globalRulestackName - GlobalRulestack resource name +// - name - fqdn list name +// - options - FqdnListGlobalRulestackClientBeginDeleteOptions contains the optional parameters for the FqdnListGlobalRulestackClient.BeginDelete +// method. +func (client *FqdnListGlobalRulestackClient) BeginDelete(ctx context.Context, globalRulestackName string, name string, options *FqdnListGlobalRulestackClientBeginDeleteOptions) (*runtime.Poller[FqdnListGlobalRulestackClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, globalRulestackName, name, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[FqdnListGlobalRulestackClientDeleteResponse]{ + FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + }) + } else { + return runtime.NewPollerFromResumeToken[FqdnListGlobalRulestackClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// Delete - Delete a FqdnListGlobalRulestackResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +func (client *FqdnListGlobalRulestackClient) deleteOperation(ctx context.Context, globalRulestackName string, name string, options *FqdnListGlobalRulestackClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, globalRulestackName, name, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *FqdnListGlobalRulestackClient) deleteCreateRequest(ctx context.Context, globalRulestackName string, name string, options *FqdnListGlobalRulestackClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/{globalRulestackName}/fqdnlists/{name}" + if globalRulestackName == "" { + return nil, errors.New("parameter globalRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{globalRulestackName}", url.PathEscape(globalRulestackName)) + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Get a FqdnListGlobalRulestackResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - globalRulestackName - GlobalRulestack resource name +// - name - fqdn list name +// - options - FqdnListGlobalRulestackClientGetOptions contains the optional parameters for the FqdnListGlobalRulestackClient.Get +// method. +func (client *FqdnListGlobalRulestackClient) Get(ctx context.Context, globalRulestackName string, name string, options *FqdnListGlobalRulestackClientGetOptions) (FqdnListGlobalRulestackClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, globalRulestackName, name, options) + if err != nil { + return FqdnListGlobalRulestackClientGetResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return FqdnListGlobalRulestackClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return FqdnListGlobalRulestackClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *FqdnListGlobalRulestackClient) getCreateRequest(ctx context.Context, globalRulestackName string, name string, options *FqdnListGlobalRulestackClientGetOptions) (*policy.Request, error) { + urlPath := "/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/{globalRulestackName}/fqdnlists/{name}" + if globalRulestackName == "" { + return nil, errors.New("parameter globalRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{globalRulestackName}", url.PathEscape(globalRulestackName)) + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *FqdnListGlobalRulestackClient) getHandleResponse(resp *http.Response) (FqdnListGlobalRulestackClientGetResponse, error) { + result := FqdnListGlobalRulestackClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.FqdnListGlobalRulestackResource); err != nil { + return FqdnListGlobalRulestackClientGetResponse{}, err + } + return result, nil +} + +// NewListPager - List FqdnListGlobalRulestackResource resources by Tenant +// +// Generated from API version 2022-08-29-preview +// - globalRulestackName - GlobalRulestack resource name +// - options - FqdnListGlobalRulestackClientListOptions contains the optional parameters for the FqdnListGlobalRulestackClient.NewListPager +// method. +func (client *FqdnListGlobalRulestackClient) NewListPager(globalRulestackName string, options *FqdnListGlobalRulestackClientListOptions) *runtime.Pager[FqdnListGlobalRulestackClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[FqdnListGlobalRulestackClientListResponse]{ + More: func(page FqdnListGlobalRulestackClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *FqdnListGlobalRulestackClientListResponse) (FqdnListGlobalRulestackClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, globalRulestackName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return FqdnListGlobalRulestackClientListResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return FqdnListGlobalRulestackClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return FqdnListGlobalRulestackClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *FqdnListGlobalRulestackClient) listCreateRequest(ctx context.Context, globalRulestackName string, options *FqdnListGlobalRulestackClientListOptions) (*policy.Request, error) { + urlPath := "/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/{globalRulestackName}/fqdnlists" + if globalRulestackName == "" { + return nil, errors.New("parameter globalRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{globalRulestackName}", url.PathEscape(globalRulestackName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *FqdnListGlobalRulestackClient) listHandleResponse(resp *http.Response) (FqdnListGlobalRulestackClientListResponse, error) { + result := FqdnListGlobalRulestackClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.FqdnListGlobalRulestackResourceListResult); err != nil { + return FqdnListGlobalRulestackClientListResponse{}, err + } + return result, nil +} diff --git a/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/fqdnlistglobalrulestack_client_example_test.go b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/fqdnlistglobalrulestack_client_example_test.go new file mode 100644 index 000000000000..dc9f95dc39a1 --- /dev/null +++ b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/fqdnlistglobalrulestack_client_example_test.go @@ -0,0 +1,311 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armpanngfw_test + +import ( + "context" + "log" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw" +) + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/FqdnListGlobalRulestack_List_MaximumSet_Gen.json +func ExampleFqdnListGlobalRulestackClient_NewListPager_fqdnListGlobalRulestackListMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + pager := clientFactory.NewFqdnListGlobalRulestackClient().NewListPager("praval", nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + log.Fatalf("failed to advance page: %v", err) + } + for _, v := range page.Value { + // You could use page here. We use blank identifier for just demo purposes. + _ = v + } + // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // page.FqdnListGlobalRulestackResourceListResult = armpanngfw.FqdnListGlobalRulestackResourceListResult{ + // Value: []*armpanngfw.FqdnListGlobalRulestackResource{ + // { + // Name: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaa"), + // Type: to.Ptr("aaaaa"), + // ID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaa"), + // SystemData: &armpanngfw.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // CreatedBy: to.Ptr("praval"), + // CreatedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // LastModifiedBy: to.Ptr("praval"), + // LastModifiedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // }, + // Properties: &armpanngfw.FqdnObject{ + // Description: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaa"), + // AuditComment: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaa"), + // Etag: to.Ptr("aaaaaaaaaaaaaaaaaa"), + // FqdnList: []*string{ + // to.Ptr("string1"), + // to.Ptr("string2")}, + // ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateSucceeded), + // }, + // }}, + // } + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/FqdnListGlobalRulestack_List_MinimumSet_Gen.json +func ExampleFqdnListGlobalRulestackClient_NewListPager_fqdnListGlobalRulestackListMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + pager := clientFactory.NewFqdnListGlobalRulestackClient().NewListPager("praval", nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + log.Fatalf("failed to advance page: %v", err) + } + for _, v := range page.Value { + // You could use page here. We use blank identifier for just demo purposes. + _ = v + } + // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // page.FqdnListGlobalRulestackResourceListResult = armpanngfw.FqdnListGlobalRulestackResourceListResult{ + // Value: []*armpanngfw.FqdnListGlobalRulestackResource{ + // { + // ID: to.Ptr("/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/praval/fqdnlists/fqdnlists1"), + // Properties: &armpanngfw.FqdnObject{ + // FqdnList: []*string{ + // to.Ptr("string1"), + // to.Ptr("string2")}, + // }, + // }}, + // } + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/FqdnListGlobalRulestack_Get_MaximumSet_Gen.json +func ExampleFqdnListGlobalRulestackClient_Get_fqdnListGlobalRulestackGetMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewFqdnListGlobalRulestackClient().Get(ctx, "praval", "armid1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.FqdnListGlobalRulestackResource = armpanngfw.FqdnListGlobalRulestackResource{ + // Name: to.Ptr("armid1"), + // Type: to.Ptr("certificates"), + // ID: to.Ptr("/providers/PaloAltoNetworks.Cloudngfw/globalrulestacks/armid1/certificates/armid1"), + // SystemData: &armpanngfw.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // CreatedBy: to.Ptr("praval"), + // CreatedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // LastModifiedBy: to.Ptr("praval"), + // LastModifiedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // }, + // Properties: &armpanngfw.FqdnObject{ + // Description: to.Ptr("string"), + // AuditComment: to.Ptr("string"), + // Etag: to.Ptr("aaaaaaaaaaaaaaaaaa"), + // FqdnList: []*string{ + // to.Ptr("string1"), + // to.Ptr("string2")}, + // ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateSucceeded), + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/FqdnListGlobalRulestack_Get_MinimumSet_Gen.json +func ExampleFqdnListGlobalRulestackClient_Get_fqdnListGlobalRulestackGetMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewFqdnListGlobalRulestackClient().Get(ctx, "praval", "armid1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.FqdnListGlobalRulestackResource = armpanngfw.FqdnListGlobalRulestackResource{ + // ID: to.Ptr("/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/praval/fqdnlists/armid1"), + // Properties: &armpanngfw.FqdnObject{ + // FqdnList: []*string{ + // to.Ptr("string1"), + // to.Ptr("string2")}, + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/FqdnListGlobalRulestack_CreateOrUpdate_MaximumSet_Gen.json +func ExampleFqdnListGlobalRulestackClient_BeginCreateOrUpdate_fqdnListGlobalRulestackCreateOrUpdateMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewFqdnListGlobalRulestackClient().BeginCreateOrUpdate(ctx, "praval", "armid1", armpanngfw.FqdnListGlobalRulestackResource{ + Properties: &armpanngfw.FqdnObject{ + Description: to.Ptr("string"), + AuditComment: to.Ptr("string"), + Etag: to.Ptr("aaaaaaaaaaaaaaaaaa"), + FqdnList: []*string{ + to.Ptr("string1"), + to.Ptr("string2")}, + ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateAccepted), + }, + }, nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + res, err := poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.FqdnListGlobalRulestackResource = armpanngfw.FqdnListGlobalRulestackResource{ + // Name: to.Ptr("armid1"), + // Type: to.Ptr("certificates"), + // ID: to.Ptr("/providers/PaloAltoNetworks.Cloudngfw/globalrulestacks/armid1/certificates/armid1"), + // SystemData: &armpanngfw.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // CreatedBy: to.Ptr("praval"), + // CreatedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // LastModifiedBy: to.Ptr("praval"), + // LastModifiedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // }, + // Properties: &armpanngfw.FqdnObject{ + // Description: to.Ptr("string"), + // AuditComment: to.Ptr("string"), + // Etag: to.Ptr("aaaaaaaaaaaaaaaaaa"), + // FqdnList: []*string{ + // to.Ptr("string1"), + // to.Ptr("string2")}, + // ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateSucceeded), + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/FqdnListGlobalRulestack_CreateOrUpdate_MinimumSet_Gen.json +func ExampleFqdnListGlobalRulestackClient_BeginCreateOrUpdate_fqdnListGlobalRulestackCreateOrUpdateMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewFqdnListGlobalRulestackClient().BeginCreateOrUpdate(ctx, "praval", "armid1", armpanngfw.FqdnListGlobalRulestackResource{ + Properties: &armpanngfw.FqdnObject{ + FqdnList: []*string{ + to.Ptr("string1"), + to.Ptr("string2")}, + }, + }, nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + res, err := poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.FqdnListGlobalRulestackResource = armpanngfw.FqdnListGlobalRulestackResource{ + // ID: to.Ptr("/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/praval/fqdnlists/armid1"), + // Properties: &armpanngfw.FqdnObject{ + // FqdnList: []*string{ + // to.Ptr("string1"), + // to.Ptr("string2")}, + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/FqdnListGlobalRulestack_Delete_MaximumSet_Gen.json +func ExampleFqdnListGlobalRulestackClient_BeginDelete_fqdnListGlobalRulestackDeleteMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewFqdnListGlobalRulestackClient().BeginDelete(ctx, "praval", "armid1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/FqdnListGlobalRulestack_Delete_MinimumSet_Gen.json +func ExampleFqdnListGlobalRulestackClient_BeginDelete_fqdnListGlobalRulestackDeleteMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewFqdnListGlobalRulestackClient().BeginDelete(ctx, "praval", "armid1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } +} diff --git a/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/fqdnlistlocalrulestack_client.go b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/fqdnlistlocalrulestack_client.go new file mode 100644 index 000000000000..bdd9267c4ea0 --- /dev/null +++ b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/fqdnlistlocalrulestack_client.go @@ -0,0 +1,323 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armpanngfw + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// FqdnListLocalRulestackClient contains the methods for the FqdnListLocalRulestack group. +// Don't use this type directly, use NewFqdnListLocalRulestackClient() instead. +type FqdnListLocalRulestackClient struct { + internal *arm.Client + subscriptionID string +} + +// NewFqdnListLocalRulestackClient creates a new instance of FqdnListLocalRulestackClient with the specified values. +// - subscriptionID - The ID of the target subscription. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewFqdnListLocalRulestackClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*FqdnListLocalRulestackClient, error) { + cl, err := arm.NewClient(moduleName+".FqdnListLocalRulestackClient", moduleVersion, credential, options) + if err != nil { + return nil, err + } + client := &FqdnListLocalRulestackClient{ + subscriptionID: subscriptionID, + internal: cl, + } + return client, nil +} + +// BeginCreateOrUpdate - Create a FqdnListLocalRulestackResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - localRulestackName - LocalRulestack resource name +// - name - fqdn list name +// - resource - Resource create parameters. +// - options - FqdnListLocalRulestackClientBeginCreateOrUpdateOptions contains the optional parameters for the FqdnListLocalRulestackClient.BeginCreateOrUpdate +// method. +func (client *FqdnListLocalRulestackClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, localRulestackName string, name string, resource FqdnListLocalRulestackResource, options *FqdnListLocalRulestackClientBeginCreateOrUpdateOptions) (*runtime.Poller[FqdnListLocalRulestackClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, resourceGroupName, localRulestackName, name, resource, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[FqdnListLocalRulestackClientCreateOrUpdateResponse]{ + FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + }) + } else { + return runtime.NewPollerFromResumeToken[FqdnListLocalRulestackClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// CreateOrUpdate - Create a FqdnListLocalRulestackResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +func (client *FqdnListLocalRulestackClient) createOrUpdate(ctx context.Context, resourceGroupName string, localRulestackName string, name string, resource FqdnListLocalRulestackResource, options *FqdnListLocalRulestackClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, localRulestackName, name, resource, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *FqdnListLocalRulestackClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, name string, resource FqdnListLocalRulestackResource, options *FqdnListLocalRulestackClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}/fqdnlists/{name}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if localRulestackName == "" { + return nil, errors.New("parameter localRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{localRulestackName}", url.PathEscape(localRulestackName)) + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, resource) +} + +// BeginDelete - Delete a FqdnListLocalRulestackResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - localRulestackName - LocalRulestack resource name +// - name - fqdn list name +// - options - FqdnListLocalRulestackClientBeginDeleteOptions contains the optional parameters for the FqdnListLocalRulestackClient.BeginDelete +// method. +func (client *FqdnListLocalRulestackClient) BeginDelete(ctx context.Context, resourceGroupName string, localRulestackName string, name string, options *FqdnListLocalRulestackClientBeginDeleteOptions) (*runtime.Poller[FqdnListLocalRulestackClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, localRulestackName, name, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[FqdnListLocalRulestackClientDeleteResponse]{ + FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + }) + } else { + return runtime.NewPollerFromResumeToken[FqdnListLocalRulestackClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// Delete - Delete a FqdnListLocalRulestackResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +func (client *FqdnListLocalRulestackClient) deleteOperation(ctx context.Context, resourceGroupName string, localRulestackName string, name string, options *FqdnListLocalRulestackClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, localRulestackName, name, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *FqdnListLocalRulestackClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, name string, options *FqdnListLocalRulestackClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}/fqdnlists/{name}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if localRulestackName == "" { + return nil, errors.New("parameter localRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{localRulestackName}", url.PathEscape(localRulestackName)) + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Get a FqdnListLocalRulestackResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - localRulestackName - LocalRulestack resource name +// - name - fqdn list name +// - options - FqdnListLocalRulestackClientGetOptions contains the optional parameters for the FqdnListLocalRulestackClient.Get +// method. +func (client *FqdnListLocalRulestackClient) Get(ctx context.Context, resourceGroupName string, localRulestackName string, name string, options *FqdnListLocalRulestackClientGetOptions) (FqdnListLocalRulestackClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, localRulestackName, name, options) + if err != nil { + return FqdnListLocalRulestackClientGetResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return FqdnListLocalRulestackClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return FqdnListLocalRulestackClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *FqdnListLocalRulestackClient) getCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, name string, options *FqdnListLocalRulestackClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}/fqdnlists/{name}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if localRulestackName == "" { + return nil, errors.New("parameter localRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{localRulestackName}", url.PathEscape(localRulestackName)) + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *FqdnListLocalRulestackClient) getHandleResponse(resp *http.Response) (FqdnListLocalRulestackClientGetResponse, error) { + result := FqdnListLocalRulestackClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.FqdnListLocalRulestackResource); err != nil { + return FqdnListLocalRulestackClientGetResponse{}, err + } + return result, nil +} + +// NewListByLocalRulestacksPager - List FqdnListLocalRulestackResource resources by LocalRulestacks +// +// Generated from API version 2022-08-29-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - localRulestackName - LocalRulestack resource name +// - options - FqdnListLocalRulestackClientListByLocalRulestacksOptions contains the optional parameters for the FqdnListLocalRulestackClient.NewListByLocalRulestacksPager +// method. +func (client *FqdnListLocalRulestackClient) NewListByLocalRulestacksPager(resourceGroupName string, localRulestackName string, options *FqdnListLocalRulestackClientListByLocalRulestacksOptions) *runtime.Pager[FqdnListLocalRulestackClientListByLocalRulestacksResponse] { + return runtime.NewPager(runtime.PagingHandler[FqdnListLocalRulestackClientListByLocalRulestacksResponse]{ + More: func(page FqdnListLocalRulestackClientListByLocalRulestacksResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *FqdnListLocalRulestackClientListByLocalRulestacksResponse) (FqdnListLocalRulestackClientListByLocalRulestacksResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listByLocalRulestacksCreateRequest(ctx, resourceGroupName, localRulestackName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return FqdnListLocalRulestackClientListByLocalRulestacksResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return FqdnListLocalRulestackClientListByLocalRulestacksResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return FqdnListLocalRulestackClientListByLocalRulestacksResponse{}, runtime.NewResponseError(resp) + } + return client.listByLocalRulestacksHandleResponse(resp) + }, + }) +} + +// listByLocalRulestacksCreateRequest creates the ListByLocalRulestacks request. +func (client *FqdnListLocalRulestackClient) listByLocalRulestacksCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, options *FqdnListLocalRulestackClientListByLocalRulestacksOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}/fqdnlists" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if localRulestackName == "" { + return nil, errors.New("parameter localRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{localRulestackName}", url.PathEscape(localRulestackName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listByLocalRulestacksHandleResponse handles the ListByLocalRulestacks response. +func (client *FqdnListLocalRulestackClient) listByLocalRulestacksHandleResponse(resp *http.Response) (FqdnListLocalRulestackClientListByLocalRulestacksResponse, error) { + result := FqdnListLocalRulestackClientListByLocalRulestacksResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.FqdnListLocalRulestackResourceListResult); err != nil { + return FqdnListLocalRulestackClientListByLocalRulestacksResponse{}, err + } + return result, nil +} diff --git a/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/fqdnlistlocalrulestack_client_example_test.go b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/fqdnlistlocalrulestack_client_example_test.go new file mode 100644 index 000000000000..593aa6d5568e --- /dev/null +++ b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/fqdnlistlocalrulestack_client_example_test.go @@ -0,0 +1,311 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armpanngfw_test + +import ( + "context" + "log" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw" +) + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/FqdnListLocalRulestack_ListByLocalRulestacks_MaximumSet_Gen.json +func ExampleFqdnListLocalRulestackClient_NewListByLocalRulestacksPager_fqdnListLocalRulestackListByLocalRulestacksMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + pager := clientFactory.NewFqdnListLocalRulestackClient().NewListByLocalRulestacksPager("rgopenapi", "lrs1", nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + log.Fatalf("failed to advance page: %v", err) + } + for _, v := range page.Value { + // You could use page here. We use blank identifier for just demo purposes. + _ = v + } + // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // page.FqdnListLocalRulestackResourceListResult = armpanngfw.FqdnListLocalRulestackResourceListResult{ + // Value: []*armpanngfw.FqdnListLocalRulestackResource{ + // { + // Name: to.Ptr("aaaaaaaaaaaaaa"), + // Type: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // ID: to.Ptr("aaaaaaaaa"), + // SystemData: &armpanngfw.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // CreatedBy: to.Ptr("praval"), + // CreatedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // LastModifiedBy: to.Ptr("praval"), + // LastModifiedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // }, + // Properties: &armpanngfw.FqdnObject{ + // Description: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaa"), + // AuditComment: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaa"), + // Etag: to.Ptr("aaaaaaaaaaaaaaaaaa"), + // FqdnList: []*string{ + // to.Ptr("string1"), + // to.Ptr("string2")}, + // ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateSucceeded), + // }, + // }}, + // } + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/FqdnListLocalRulestack_ListByLocalRulestacks_MinimumSet_Gen.json +func ExampleFqdnListLocalRulestackClient_NewListByLocalRulestacksPager_fqdnListLocalRulestackListByLocalRulestacksMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + pager := clientFactory.NewFqdnListLocalRulestackClient().NewListByLocalRulestacksPager("rgopenapi", "lrs1", nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + log.Fatalf("failed to advance page: %v", err) + } + for _, v := range page.Value { + // You could use page here. We use blank identifier for just demo purposes. + _ = v + } + // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // page.FqdnListLocalRulestackResourceListResult = armpanngfw.FqdnListLocalRulestackResourceListResult{ + // Value: []*armpanngfw.FqdnListLocalRulestackResource{ + // { + // ID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourcegroups/rgopenapi/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/praval/fqdnlists/fqdnlists1"), + // Properties: &armpanngfw.FqdnObject{ + // FqdnList: []*string{ + // to.Ptr("string1"), + // to.Ptr("string2")}, + // }, + // }}, + // } + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/FqdnListLocalRulestack_Get_MaximumSet_Gen.json +func ExampleFqdnListLocalRulestackClient_Get_fqdnListLocalRulestackGetMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewFqdnListLocalRulestackClient().Get(ctx, "rgopenapi", "lrs1", "armid1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.FqdnListLocalRulestackResource = armpanngfw.FqdnListLocalRulestackResource{ + // Name: to.Ptr("armid1"), + // Type: to.Ptr("certificates"), + // ID: to.Ptr("/providers/PaloAltoNetworks.Cloudngfw/globalrulestacks/armid1/certificates/armid1"), + // SystemData: &armpanngfw.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // CreatedBy: to.Ptr("praval"), + // CreatedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // LastModifiedBy: to.Ptr("praval"), + // LastModifiedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // }, + // Properties: &armpanngfw.FqdnObject{ + // Description: to.Ptr("string"), + // AuditComment: to.Ptr("string"), + // Etag: to.Ptr("aaaaaaaaaaaaaaaaaa"), + // FqdnList: []*string{ + // to.Ptr("string1"), + // to.Ptr("string2")}, + // ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateSucceeded), + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/FqdnListLocalRulestack_Get_MinimumSet_Gen.json +func ExampleFqdnListLocalRulestackClient_Get_fqdnListLocalRulestackGetMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewFqdnListLocalRulestackClient().Get(ctx, "rgopenapi", "lrs1", "armid1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.FqdnListLocalRulestackResource = armpanngfw.FqdnListLocalRulestackResource{ + // ID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourcegroups/rgopenapi/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/praval/fqdnlists/armid1"), + // Properties: &armpanngfw.FqdnObject{ + // FqdnList: []*string{ + // to.Ptr("string1"), + // to.Ptr("string2")}, + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/FqdnListLocalRulestack_CreateOrUpdate_MaximumSet_Gen.json +func ExampleFqdnListLocalRulestackClient_BeginCreateOrUpdate_fqdnListLocalRulestackCreateOrUpdateMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewFqdnListLocalRulestackClient().BeginCreateOrUpdate(ctx, "rgopenapi", "lrs1", "armid1", armpanngfw.FqdnListLocalRulestackResource{ + Properties: &armpanngfw.FqdnObject{ + Description: to.Ptr("string"), + AuditComment: to.Ptr("string"), + Etag: to.Ptr("aaaaaaaaaaaaaaaaaa"), + FqdnList: []*string{ + to.Ptr("string1"), + to.Ptr("string2")}, + ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateAccepted), + }, + }, nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + res, err := poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.FqdnListLocalRulestackResource = armpanngfw.FqdnListLocalRulestackResource{ + // Name: to.Ptr("armid1"), + // Type: to.Ptr("certificates"), + // ID: to.Ptr("/providers/PaloAltoNetworks.Cloudngfw/globalrulestacks/armid1/certificates/armid1"), + // SystemData: &armpanngfw.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // CreatedBy: to.Ptr("praval"), + // CreatedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // LastModifiedBy: to.Ptr("praval"), + // LastModifiedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // }, + // Properties: &armpanngfw.FqdnObject{ + // Description: to.Ptr("string"), + // AuditComment: to.Ptr("string"), + // Etag: to.Ptr("aaaaaaaaaaaaaaaaaa"), + // FqdnList: []*string{ + // to.Ptr("string1"), + // to.Ptr("string2")}, + // ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateSucceeded), + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/FqdnListLocalRulestack_CreateOrUpdate_MinimumSet_Gen.json +func ExampleFqdnListLocalRulestackClient_BeginCreateOrUpdate_fqdnListLocalRulestackCreateOrUpdateMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewFqdnListLocalRulestackClient().BeginCreateOrUpdate(ctx, "rgopenapi", "lrs1", "armid1", armpanngfw.FqdnListLocalRulestackResource{ + Properties: &armpanngfw.FqdnObject{ + FqdnList: []*string{ + to.Ptr("string1"), + to.Ptr("string2")}, + }, + }, nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + res, err := poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.FqdnListLocalRulestackResource = armpanngfw.FqdnListLocalRulestackResource{ + // ID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourcegroups/rgopenapi/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/praval/fqdnlists/armid1"), + // Properties: &armpanngfw.FqdnObject{ + // FqdnList: []*string{ + // to.Ptr("string1"), + // to.Ptr("string2")}, + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/FqdnListLocalRulestack_Delete_MaximumSet_Gen.json +func ExampleFqdnListLocalRulestackClient_BeginDelete_fqdnListLocalRulestackDeleteMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewFqdnListLocalRulestackClient().BeginDelete(ctx, "rgopenapi", "lrs1", "armid1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/FqdnListLocalRulestack_Delete_MinimumSet_Gen.json +func ExampleFqdnListLocalRulestackClient_BeginDelete_fqdnListLocalRulestackDeleteMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewFqdnListLocalRulestackClient().BeginDelete(ctx, "rgopenapi", "lrs1", "armid1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } +} diff --git a/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/globalrulestack_client.go b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/globalrulestack_client.go new file mode 100644 index 000000000000..37bb85ed24b0 --- /dev/null +++ b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/globalrulestack_client.go @@ -0,0 +1,791 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armpanngfw + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strconv" + "strings" +) + +// GlobalRulestackClient contains the methods for the GlobalRulestack group. +// Don't use this type directly, use NewGlobalRulestackClient() instead. +type GlobalRulestackClient struct { + internal *arm.Client +} + +// NewGlobalRulestackClient creates a new instance of GlobalRulestackClient with the specified values. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewGlobalRulestackClient(credential azcore.TokenCredential, options *arm.ClientOptions) (*GlobalRulestackClient, error) { + cl, err := arm.NewClient(moduleName+".GlobalRulestackClient", moduleVersion, credential, options) + if err != nil { + return nil, err + } + client := &GlobalRulestackClient{ + internal: cl, + } + return client, nil +} + +// BeginCommit - Commit rulestack configuration +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - globalRulestackName - GlobalRulestack resource name +// - options - GlobalRulestackClientBeginCommitOptions contains the optional parameters for the GlobalRulestackClient.BeginCommit +// method. +func (client *GlobalRulestackClient) BeginCommit(ctx context.Context, globalRulestackName string, options *GlobalRulestackClientBeginCommitOptions) (*runtime.Poller[GlobalRulestackClientCommitResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.commit(ctx, globalRulestackName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[GlobalRulestackClientCommitResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + }) + } else { + return runtime.NewPollerFromResumeToken[GlobalRulestackClientCommitResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// Commit - Commit rulestack configuration +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +func (client *GlobalRulestackClient) commit(ctx context.Context, globalRulestackName string, options *GlobalRulestackClientBeginCommitOptions) (*http.Response, error) { + req, err := client.commitCreateRequest(ctx, globalRulestackName, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// commitCreateRequest creates the Commit request. +func (client *GlobalRulestackClient) commitCreateRequest(ctx context.Context, globalRulestackName string, options *GlobalRulestackClientBeginCommitOptions) (*policy.Request, error) { + urlPath := "/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/{globalRulestackName}/commit" + if globalRulestackName == "" { + return nil, errors.New("parameter globalRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{globalRulestackName}", url.PathEscape(globalRulestackName)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// BeginCreateOrUpdate - Create a GlobalRulestackResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - globalRulestackName - GlobalRulestack resource name +// - resource - Resource create parameters. +// - options - GlobalRulestackClientBeginCreateOrUpdateOptions contains the optional parameters for the GlobalRulestackClient.BeginCreateOrUpdate +// method. +func (client *GlobalRulestackClient) BeginCreateOrUpdate(ctx context.Context, globalRulestackName string, resource GlobalRulestackResource, options *GlobalRulestackClientBeginCreateOrUpdateOptions) (*runtime.Poller[GlobalRulestackClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, globalRulestackName, resource, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[GlobalRulestackClientCreateOrUpdateResponse]{ + FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + }) + } else { + return runtime.NewPollerFromResumeToken[GlobalRulestackClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// CreateOrUpdate - Create a GlobalRulestackResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +func (client *GlobalRulestackClient) createOrUpdate(ctx context.Context, globalRulestackName string, resource GlobalRulestackResource, options *GlobalRulestackClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, globalRulestackName, resource, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *GlobalRulestackClient) createOrUpdateCreateRequest(ctx context.Context, globalRulestackName string, resource GlobalRulestackResource, options *GlobalRulestackClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/{globalRulestackName}" + if globalRulestackName == "" { + return nil, errors.New("parameter globalRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{globalRulestackName}", url.PathEscape(globalRulestackName)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, resource) +} + +// BeginDelete - Delete a GlobalRulestackResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - globalRulestackName - GlobalRulestack resource name +// - options - GlobalRulestackClientBeginDeleteOptions contains the optional parameters for the GlobalRulestackClient.BeginDelete +// method. +func (client *GlobalRulestackClient) BeginDelete(ctx context.Context, globalRulestackName string, options *GlobalRulestackClientBeginDeleteOptions) (*runtime.Poller[GlobalRulestackClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, globalRulestackName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[GlobalRulestackClientDeleteResponse]{ + FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + }) + } else { + return runtime.NewPollerFromResumeToken[GlobalRulestackClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// Delete - Delete a GlobalRulestackResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +func (client *GlobalRulestackClient) deleteOperation(ctx context.Context, globalRulestackName string, options *GlobalRulestackClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, globalRulestackName, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *GlobalRulestackClient) deleteCreateRequest(ctx context.Context, globalRulestackName string, options *GlobalRulestackClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/{globalRulestackName}" + if globalRulestackName == "" { + return nil, errors.New("parameter globalRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{globalRulestackName}", url.PathEscape(globalRulestackName)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Get a GlobalRulestackResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - globalRulestackName - GlobalRulestack resource name +// - options - GlobalRulestackClientGetOptions contains the optional parameters for the GlobalRulestackClient.Get method. +func (client *GlobalRulestackClient) Get(ctx context.Context, globalRulestackName string, options *GlobalRulestackClientGetOptions) (GlobalRulestackClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, globalRulestackName, options) + if err != nil { + return GlobalRulestackClientGetResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return GlobalRulestackClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return GlobalRulestackClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *GlobalRulestackClient) getCreateRequest(ctx context.Context, globalRulestackName string, options *GlobalRulestackClientGetOptions) (*policy.Request, error) { + urlPath := "/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/{globalRulestackName}" + if globalRulestackName == "" { + return nil, errors.New("parameter globalRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{globalRulestackName}", url.PathEscape(globalRulestackName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *GlobalRulestackClient) getHandleResponse(resp *http.Response) (GlobalRulestackClientGetResponse, error) { + result := GlobalRulestackClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.GlobalRulestackResource); err != nil { + return GlobalRulestackClientGetResponse{}, err + } + return result, nil +} + +// GetChangeLog - Get changelog +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - globalRulestackName - GlobalRulestack resource name +// - options - GlobalRulestackClientGetChangeLogOptions contains the optional parameters for the GlobalRulestackClient.GetChangeLog +// method. +func (client *GlobalRulestackClient) GetChangeLog(ctx context.Context, globalRulestackName string, options *GlobalRulestackClientGetChangeLogOptions) (GlobalRulestackClientGetChangeLogResponse, error) { + req, err := client.getChangeLogCreateRequest(ctx, globalRulestackName, options) + if err != nil { + return GlobalRulestackClientGetChangeLogResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return GlobalRulestackClientGetChangeLogResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return GlobalRulestackClientGetChangeLogResponse{}, runtime.NewResponseError(resp) + } + return client.getChangeLogHandleResponse(resp) +} + +// getChangeLogCreateRequest creates the GetChangeLog request. +func (client *GlobalRulestackClient) getChangeLogCreateRequest(ctx context.Context, globalRulestackName string, options *GlobalRulestackClientGetChangeLogOptions) (*policy.Request, error) { + urlPath := "/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/{globalRulestackName}/getChangeLog" + if globalRulestackName == "" { + return nil, errors.New("parameter globalRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{globalRulestackName}", url.PathEscape(globalRulestackName)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getChangeLogHandleResponse handles the GetChangeLog response. +func (client *GlobalRulestackClient) getChangeLogHandleResponse(resp *http.Response) (GlobalRulestackClientGetChangeLogResponse, error) { + result := GlobalRulestackClientGetChangeLogResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.Changelog); err != nil { + return GlobalRulestackClientGetChangeLogResponse{}, err + } + return result, nil +} + +// NewListPager - List GlobalRulestackResource resources by Tenant +// +// Generated from API version 2022-08-29-preview +// - options - GlobalRulestackClientListOptions contains the optional parameters for the GlobalRulestackClient.NewListPager +// method. +func (client *GlobalRulestackClient) NewListPager(options *GlobalRulestackClientListOptions) *runtime.Pager[GlobalRulestackClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[GlobalRulestackClientListResponse]{ + More: func(page GlobalRulestackClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *GlobalRulestackClientListResponse) (GlobalRulestackClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return GlobalRulestackClientListResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return GlobalRulestackClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return GlobalRulestackClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *GlobalRulestackClient) listCreateRequest(ctx context.Context, options *GlobalRulestackClientListOptions) (*policy.Request, error) { + urlPath := "/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks" + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *GlobalRulestackClient) listHandleResponse(resp *http.Response) (GlobalRulestackClientListResponse, error) { + result := GlobalRulestackClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.GlobalRulestackResourceListResult); err != nil { + return GlobalRulestackClientListResponse{}, err + } + return result, nil +} + +// ListAdvancedSecurityObjects - Get the list of advanced security objects +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - globalRulestackName - GlobalRulestack resource name +// - options - GlobalRulestackClientListAdvancedSecurityObjectsOptions contains the optional parameters for the GlobalRulestackClient.ListAdvancedSecurityObjects +// method. +func (client *GlobalRulestackClient) ListAdvancedSecurityObjects(ctx context.Context, globalRulestackName string, typeParam AdvSecurityObjectTypeEnum, options *GlobalRulestackClientListAdvancedSecurityObjectsOptions) (GlobalRulestackClientListAdvancedSecurityObjectsResponse, error) { + req, err := client.listAdvancedSecurityObjectsCreateRequest(ctx, globalRulestackName, typeParam, options) + if err != nil { + return GlobalRulestackClientListAdvancedSecurityObjectsResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return GlobalRulestackClientListAdvancedSecurityObjectsResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return GlobalRulestackClientListAdvancedSecurityObjectsResponse{}, runtime.NewResponseError(resp) + } + return client.listAdvancedSecurityObjectsHandleResponse(resp) +} + +// listAdvancedSecurityObjectsCreateRequest creates the ListAdvancedSecurityObjects request. +func (client *GlobalRulestackClient) listAdvancedSecurityObjectsCreateRequest(ctx context.Context, globalRulestackName string, typeParam AdvSecurityObjectTypeEnum, options *GlobalRulestackClientListAdvancedSecurityObjectsOptions) (*policy.Request, error) { + urlPath := "/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/{globalRulestackName}/listAdvancedSecurityObjects" + if globalRulestackName == "" { + return nil, errors.New("parameter globalRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{globalRulestackName}", url.PathEscape(globalRulestackName)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + if options != nil && options.Skip != nil { + reqQP.Set("skip", *options.Skip) + } + if options != nil && options.Top != nil { + reqQP.Set("top", strconv.FormatInt(int64(*options.Top), 10)) + } + reqQP.Set("type", string(typeParam)) + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listAdvancedSecurityObjectsHandleResponse handles the ListAdvancedSecurityObjects response. +func (client *GlobalRulestackClient) listAdvancedSecurityObjectsHandleResponse(resp *http.Response) (GlobalRulestackClientListAdvancedSecurityObjectsResponse, error) { + result := GlobalRulestackClientListAdvancedSecurityObjectsResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.AdvSecurityObjectListResponse); err != nil { + return GlobalRulestackClientListAdvancedSecurityObjectsResponse{}, err + } + return result, nil +} + +// ListAppIDs - List of AppIds for GlobalRulestack ApiVersion +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - globalRulestackName - GlobalRulestack resource name +// - options - GlobalRulestackClientListAppIDsOptions contains the optional parameters for the GlobalRulestackClient.ListAppIDs +// method. +func (client *GlobalRulestackClient) ListAppIDs(ctx context.Context, globalRulestackName string, options *GlobalRulestackClientListAppIDsOptions) (GlobalRulestackClientListAppIDsResponse, error) { + req, err := client.listAppIDsCreateRequest(ctx, globalRulestackName, options) + if err != nil { + return GlobalRulestackClientListAppIDsResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return GlobalRulestackClientListAppIDsResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return GlobalRulestackClientListAppIDsResponse{}, runtime.NewResponseError(resp) + } + return client.listAppIDsHandleResponse(resp) +} + +// listAppIDsCreateRequest creates the ListAppIDs request. +func (client *GlobalRulestackClient) listAppIDsCreateRequest(ctx context.Context, globalRulestackName string, options *GlobalRulestackClientListAppIDsOptions) (*policy.Request, error) { + urlPath := "/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/{globalRulestackName}/listAppIds" + if globalRulestackName == "" { + return nil, errors.New("parameter globalRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{globalRulestackName}", url.PathEscape(globalRulestackName)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + if options != nil && options.AppIDVersion != nil { + reqQP.Set("appIdVersion", *options.AppIDVersion) + } + if options != nil && options.AppPrefix != nil { + reqQP.Set("appPrefix", *options.AppPrefix) + } + if options != nil && options.Skip != nil { + reqQP.Set("skip", *options.Skip) + } + if options != nil && options.Top != nil { + reqQP.Set("top", strconv.FormatInt(int64(*options.Top), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listAppIDsHandleResponse handles the ListAppIDs response. +func (client *GlobalRulestackClient) listAppIDsHandleResponse(resp *http.Response) (GlobalRulestackClientListAppIDsResponse, error) { + result := GlobalRulestackClientListAppIDsResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.ListAppIDResponse); err != nil { + return GlobalRulestackClientListAppIDsResponse{}, err + } + return result, nil +} + +// ListCountries - List of countries for Rulestack +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - globalRulestackName - GlobalRulestack resource name +// - options - GlobalRulestackClientListCountriesOptions contains the optional parameters for the GlobalRulestackClient.ListCountries +// method. +func (client *GlobalRulestackClient) ListCountries(ctx context.Context, globalRulestackName string, options *GlobalRulestackClientListCountriesOptions) (GlobalRulestackClientListCountriesResponse, error) { + req, err := client.listCountriesCreateRequest(ctx, globalRulestackName, options) + if err != nil { + return GlobalRulestackClientListCountriesResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return GlobalRulestackClientListCountriesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return GlobalRulestackClientListCountriesResponse{}, runtime.NewResponseError(resp) + } + return client.listCountriesHandleResponse(resp) +} + +// listCountriesCreateRequest creates the ListCountries request. +func (client *GlobalRulestackClient) listCountriesCreateRequest(ctx context.Context, globalRulestackName string, options *GlobalRulestackClientListCountriesOptions) (*policy.Request, error) { + urlPath := "/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/{globalRulestackName}/listCountries" + if globalRulestackName == "" { + return nil, errors.New("parameter globalRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{globalRulestackName}", url.PathEscape(globalRulestackName)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + if options != nil && options.Skip != nil { + reqQP.Set("skip", *options.Skip) + } + if options != nil && options.Top != nil { + reqQP.Set("top", strconv.FormatInt(int64(*options.Top), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listCountriesHandleResponse handles the ListCountries response. +func (client *GlobalRulestackClient) listCountriesHandleResponse(resp *http.Response) (GlobalRulestackClientListCountriesResponse, error) { + result := GlobalRulestackClientListCountriesResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.CountriesResponse); err != nil { + return GlobalRulestackClientListCountriesResponse{}, err + } + return result, nil +} + +// ListFirewalls - List of Firewalls associated with Rulestack +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - globalRulestackName - GlobalRulestack resource name +// - options - GlobalRulestackClientListFirewallsOptions contains the optional parameters for the GlobalRulestackClient.ListFirewalls +// method. +func (client *GlobalRulestackClient) ListFirewalls(ctx context.Context, globalRulestackName string, options *GlobalRulestackClientListFirewallsOptions) (GlobalRulestackClientListFirewallsResponse, error) { + req, err := client.listFirewallsCreateRequest(ctx, globalRulestackName, options) + if err != nil { + return GlobalRulestackClientListFirewallsResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return GlobalRulestackClientListFirewallsResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return GlobalRulestackClientListFirewallsResponse{}, runtime.NewResponseError(resp) + } + return client.listFirewallsHandleResponse(resp) +} + +// listFirewallsCreateRequest creates the ListFirewalls request. +func (client *GlobalRulestackClient) listFirewallsCreateRequest(ctx context.Context, globalRulestackName string, options *GlobalRulestackClientListFirewallsOptions) (*policy.Request, error) { + urlPath := "/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/{globalRulestackName}/listFirewalls" + if globalRulestackName == "" { + return nil, errors.New("parameter globalRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{globalRulestackName}", url.PathEscape(globalRulestackName)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listFirewallsHandleResponse handles the ListFirewalls response. +func (client *GlobalRulestackClient) listFirewallsHandleResponse(resp *http.Response) (GlobalRulestackClientListFirewallsResponse, error) { + result := GlobalRulestackClientListFirewallsResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.ListFirewallsResponse); err != nil { + return GlobalRulestackClientListFirewallsResponse{}, err + } + return result, nil +} + +// ListPredefinedURLCategories - List predefined URL categories for rulestack +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - globalRulestackName - GlobalRulestack resource name +// - options - GlobalRulestackClientListPredefinedURLCategoriesOptions contains the optional parameters for the GlobalRulestackClient.ListPredefinedURLCategories +// method. +func (client *GlobalRulestackClient) ListPredefinedURLCategories(ctx context.Context, globalRulestackName string, options *GlobalRulestackClientListPredefinedURLCategoriesOptions) (GlobalRulestackClientListPredefinedURLCategoriesResponse, error) { + req, err := client.listPredefinedURLCategoriesCreateRequest(ctx, globalRulestackName, options) + if err != nil { + return GlobalRulestackClientListPredefinedURLCategoriesResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return GlobalRulestackClientListPredefinedURLCategoriesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return GlobalRulestackClientListPredefinedURLCategoriesResponse{}, runtime.NewResponseError(resp) + } + return client.listPredefinedURLCategoriesHandleResponse(resp) +} + +// listPredefinedURLCategoriesCreateRequest creates the ListPredefinedURLCategories request. +func (client *GlobalRulestackClient) listPredefinedURLCategoriesCreateRequest(ctx context.Context, globalRulestackName string, options *GlobalRulestackClientListPredefinedURLCategoriesOptions) (*policy.Request, error) { + urlPath := "/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/{globalRulestackName}/listPredefinedUrlCategories" + if globalRulestackName == "" { + return nil, errors.New("parameter globalRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{globalRulestackName}", url.PathEscape(globalRulestackName)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + if options != nil && options.Skip != nil { + reqQP.Set("skip", *options.Skip) + } + if options != nil && options.Top != nil { + reqQP.Set("top", strconv.FormatInt(int64(*options.Top), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listPredefinedURLCategoriesHandleResponse handles the ListPredefinedURLCategories response. +func (client *GlobalRulestackClient) listPredefinedURLCategoriesHandleResponse(resp *http.Response) (GlobalRulestackClientListPredefinedURLCategoriesResponse, error) { + result := GlobalRulestackClientListPredefinedURLCategoriesResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.PredefinedURLCategoriesResponse); err != nil { + return GlobalRulestackClientListPredefinedURLCategoriesResponse{}, err + } + return result, nil +} + +// ListSecurityServices - List the security services for rulestack +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - globalRulestackName - GlobalRulestack resource name +// - options - GlobalRulestackClientListSecurityServicesOptions contains the optional parameters for the GlobalRulestackClient.ListSecurityServices +// method. +func (client *GlobalRulestackClient) ListSecurityServices(ctx context.Context, globalRulestackName string, typeParam SecurityServicesTypeEnum, options *GlobalRulestackClientListSecurityServicesOptions) (GlobalRulestackClientListSecurityServicesResponse, error) { + req, err := client.listSecurityServicesCreateRequest(ctx, globalRulestackName, typeParam, options) + if err != nil { + return GlobalRulestackClientListSecurityServicesResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return GlobalRulestackClientListSecurityServicesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return GlobalRulestackClientListSecurityServicesResponse{}, runtime.NewResponseError(resp) + } + return client.listSecurityServicesHandleResponse(resp) +} + +// listSecurityServicesCreateRequest creates the ListSecurityServices request. +func (client *GlobalRulestackClient) listSecurityServicesCreateRequest(ctx context.Context, globalRulestackName string, typeParam SecurityServicesTypeEnum, options *GlobalRulestackClientListSecurityServicesOptions) (*policy.Request, error) { + urlPath := "/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/{globalRulestackName}/listSecurityServices" + if globalRulestackName == "" { + return nil, errors.New("parameter globalRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{globalRulestackName}", url.PathEscape(globalRulestackName)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + if options != nil && options.Skip != nil { + reqQP.Set("skip", *options.Skip) + } + if options != nil && options.Top != nil { + reqQP.Set("top", strconv.FormatInt(int64(*options.Top), 10)) + } + reqQP.Set("type", string(typeParam)) + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listSecurityServicesHandleResponse handles the ListSecurityServices response. +func (client *GlobalRulestackClient) listSecurityServicesHandleResponse(resp *http.Response) (GlobalRulestackClientListSecurityServicesResponse, error) { + result := GlobalRulestackClientListSecurityServicesResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.SecurityServicesResponse); err != nil { + return GlobalRulestackClientListSecurityServicesResponse{}, err + } + return result, nil +} + +// Revert - Revert rulestack configuration +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - globalRulestackName - GlobalRulestack resource name +// - options - GlobalRulestackClientRevertOptions contains the optional parameters for the GlobalRulestackClient.Revert method. +func (client *GlobalRulestackClient) Revert(ctx context.Context, globalRulestackName string, options *GlobalRulestackClientRevertOptions) (GlobalRulestackClientRevertResponse, error) { + req, err := client.revertCreateRequest(ctx, globalRulestackName, options) + if err != nil { + return GlobalRulestackClientRevertResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return GlobalRulestackClientRevertResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusNoContent) { + return GlobalRulestackClientRevertResponse{}, runtime.NewResponseError(resp) + } + return GlobalRulestackClientRevertResponse{}, nil +} + +// revertCreateRequest creates the Revert request. +func (client *GlobalRulestackClient) revertCreateRequest(ctx context.Context, globalRulestackName string, options *GlobalRulestackClientRevertOptions) (*policy.Request, error) { + urlPath := "/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/{globalRulestackName}/revert" + if globalRulestackName == "" { + return nil, errors.New("parameter globalRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{globalRulestackName}", url.PathEscape(globalRulestackName)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Update - Update a GlobalRulestackResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - globalRulestackName - GlobalRulestack resource name +// - properties - The resource properties to be updated. +// - options - GlobalRulestackClientUpdateOptions contains the optional parameters for the GlobalRulestackClient.Update method. +func (client *GlobalRulestackClient) Update(ctx context.Context, globalRulestackName string, properties GlobalRulestackResourceUpdate, options *GlobalRulestackClientUpdateOptions) (GlobalRulestackClientUpdateResponse, error) { + req, err := client.updateCreateRequest(ctx, globalRulestackName, properties, options) + if err != nil { + return GlobalRulestackClientUpdateResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return GlobalRulestackClientUpdateResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return GlobalRulestackClientUpdateResponse{}, runtime.NewResponseError(resp) + } + return client.updateHandleResponse(resp) +} + +// updateCreateRequest creates the Update request. +func (client *GlobalRulestackClient) updateCreateRequest(ctx context.Context, globalRulestackName string, properties GlobalRulestackResourceUpdate, options *GlobalRulestackClientUpdateOptions) (*policy.Request, error) { + urlPath := "/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/{globalRulestackName}" + if globalRulestackName == "" { + return nil, errors.New("parameter globalRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{globalRulestackName}", url.PathEscape(globalRulestackName)) + req, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, properties) +} + +// updateHandleResponse handles the Update response. +func (client *GlobalRulestackClient) updateHandleResponse(resp *http.Response) (GlobalRulestackClientUpdateResponse, error) { + result := GlobalRulestackClientUpdateResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.GlobalRulestackResource); err != nil { + return GlobalRulestackClientUpdateResponse{}, err + } + return result, nil +} diff --git a/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/globalrulestack_client_example_test.go b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/globalrulestack_client_example_test.go new file mode 100644 index 000000000000..b7cbad9ced12 --- /dev/null +++ b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/globalrulestack_client_example_test.go @@ -0,0 +1,987 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armpanngfw_test + +import ( + "context" + "log" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw" +) + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/GlobalRulestack_List_MaximumSet_Gen.json +func ExampleGlobalRulestackClient_NewListPager_globalRulestackListMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + pager := clientFactory.NewGlobalRulestackClient().NewListPager(nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + log.Fatalf("failed to advance page: %v", err) + } + for _, v := range page.Value { + // You could use page here. We use blank identifier for just demo purposes. + _ = v + } + // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // page.GlobalRulestackResourceListResult = armpanngfw.GlobalRulestackResourceListResult{ + // Value: []*armpanngfw.GlobalRulestackResource{ + // { + // Name: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaa"), + // Type: to.Ptr("aaaaaaaaaaaaaaaaaa"), + // ID: to.Ptr("aaaaaaaaaa"), + // SystemData: &armpanngfw.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // CreatedBy: to.Ptr("praval"), + // CreatedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // LastModifiedBy: to.Ptr("praval"), + // LastModifiedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // }, + // Identity: &armpanngfw.AzureResourceManagerManagedIdentityProperties{ + // Type: to.Ptr(armpanngfw.ManagedIdentityTypeNone), + // PrincipalID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaa"), + // TenantID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // UserAssignedIdentities: map[string]*armpanngfw.AzureResourceManagerUserAssignedIdentity{ + // "key16": &armpanngfw.AzureResourceManagerUserAssignedIdentity{ + // ClientID: to.Ptr("aaaa"), + // PrincipalID: to.Ptr("aaaaaaaaaaaaaaa"), + // }, + // }, + // }, + // Location: to.Ptr("eastus"), + // Properties: &armpanngfw.RulestackProperties{ + // Description: to.Ptr("aaaaaaaaaaaaaaaaaaaaa"), + // AssociatedSubscriptions: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaa")}, + // DefaultMode: to.Ptr(armpanngfw.DefaultModeIPS), + // MinAppIDVersion: to.Ptr("aaaaaaaaaaaaaaaaaaaaa"), + // PanEtag: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c12"), + // PanLocation: to.Ptr("eastus"), + // ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateSucceeded), + // Scope: to.Ptr(armpanngfw.ScopeTypeLOCAL), + // SecurityServices: &armpanngfw.SecurityServices{ + // AntiSpywareProfile: to.Ptr("aaaaaaaaaa"), + // AntiVirusProfile: to.Ptr("aaaaaaaaaaaaaaaaaaaaaa"), + // DNSSubscription: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // FileBlockingProfile: to.Ptr("aaaaa"), + // OutboundTrustCertificate: to.Ptr("aaaaaa"), + // OutboundUnTrustCertificate: to.Ptr("aaaaaaaa"), + // URLFilteringProfile: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // VulnerabilityProfile: to.Ptr("aaaaaaaaaa"), + // }, + // }, + // }}, + // } + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/GlobalRulestack_List_MinimumSet_Gen.json +func ExampleGlobalRulestackClient_NewListPager_globalRulestackListMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + pager := clientFactory.NewGlobalRulestackClient().NewListPager(nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + log.Fatalf("failed to advance page: %v", err) + } + for _, v := range page.Value { + // You could use page here. We use blank identifier for just demo purposes. + _ = v + } + // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // page.GlobalRulestackResourceListResult = armpanngfw.GlobalRulestackResourceListResult{ + // Value: []*armpanngfw.GlobalRulestackResource{ + // { + // ID: to.Ptr("/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/grs1"), + // Location: to.Ptr("eastus"), + // Properties: &armpanngfw.RulestackProperties{ + // }, + // }}, + // } + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/GlobalRulestack_Get_MaximumSet_Gen.json +func ExampleGlobalRulestackClient_Get_globalRulestackGetMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewGlobalRulestackClient().Get(ctx, "praval", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.GlobalRulestackResource = armpanngfw.GlobalRulestackResource{ + // Name: to.Ptr("armid1"), + // Type: to.Ptr("globalRulestacks"), + // ID: to.Ptr("/providers/PaloAltoNetworks.Cloudngfw/globalrulestacks/armid1"), + // SystemData: &armpanngfw.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // CreatedBy: to.Ptr("praval"), + // CreatedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // LastModifiedBy: to.Ptr("praval"), + // LastModifiedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // }, + // Identity: &armpanngfw.AzureResourceManagerManagedIdentityProperties{ + // Type: to.Ptr(armpanngfw.ManagedIdentityTypeNone), + // PrincipalID: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c27"), + // TenantID: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c27"), + // UserAssignedIdentities: map[string]*armpanngfw.AzureResourceManagerUserAssignedIdentity{ + // "key16": &armpanngfw.AzureResourceManagerUserAssignedIdentity{ + // ClientID: to.Ptr("aaaa"), + // PrincipalID: to.Ptr("aaaaaaaaaaaaaaa"), + // }, + // }, + // }, + // Location: to.Ptr("eastus"), + // Properties: &armpanngfw.RulestackProperties{ + // Description: to.Ptr("global rulestacks"), + // AssociatedSubscriptions: []*string{ + // to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c27")}, + // DefaultMode: to.Ptr(armpanngfw.DefaultModeIPS), + // MinAppIDVersion: to.Ptr("8.5.3"), + // PanEtag: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c12"), + // PanLocation: to.Ptr("eastus"), + // ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateSucceeded), + // Scope: to.Ptr(armpanngfw.ScopeTypeGLOBAL), + // SecurityServices: &armpanngfw.SecurityServices{ + // AntiSpywareProfile: to.Ptr("default"), + // AntiVirusProfile: to.Ptr("default"), + // DNSSubscription: to.Ptr("default"), + // FileBlockingProfile: to.Ptr("default"), + // OutboundTrustCertificate: to.Ptr("default"), + // OutboundUnTrustCertificate: to.Ptr("default"), + // URLFilteringProfile: to.Ptr("default"), + // VulnerabilityProfile: to.Ptr("default"), + // }, + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/GlobalRulestack_Get_MinimumSet_Gen.json +func ExampleGlobalRulestackClient_Get_globalRulestackGetMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewGlobalRulestackClient().Get(ctx, "praval", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.GlobalRulestackResource = armpanngfw.GlobalRulestackResource{ + // ID: to.Ptr("/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/praval"), + // Location: to.Ptr("eastus"), + // Properties: &armpanngfw.RulestackProperties{ + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/GlobalRulestack_CreateOrUpdate_MaximumSet_Gen.json +func ExampleGlobalRulestackClient_BeginCreateOrUpdate_globalRulestackCreateOrUpdateMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewGlobalRulestackClient().BeginCreateOrUpdate(ctx, "praval", armpanngfw.GlobalRulestackResource{ + Identity: &armpanngfw.AzureResourceManagerManagedIdentityProperties{ + Type: to.Ptr(armpanngfw.ManagedIdentityTypeNone), + UserAssignedIdentities: map[string]*armpanngfw.AzureResourceManagerUserAssignedIdentity{ + "key16": { + ClientID: to.Ptr("aaaa"), + PrincipalID: to.Ptr("aaaaaaaaaaaaaaa"), + }, + }, + }, + Location: to.Ptr("eastus"), + Properties: &armpanngfw.RulestackProperties{ + Description: to.Ptr("global rulestacks"), + AssociatedSubscriptions: []*string{ + to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c27")}, + DefaultMode: to.Ptr(armpanngfw.DefaultModeIPS), + MinAppIDVersion: to.Ptr("8.5.3"), + PanEtag: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c12"), + PanLocation: to.Ptr("eastus"), + ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateAccepted), + Scope: to.Ptr(armpanngfw.ScopeTypeGLOBAL), + SecurityServices: &armpanngfw.SecurityServices{ + AntiSpywareProfile: to.Ptr("default"), + AntiVirusProfile: to.Ptr("default"), + DNSSubscription: to.Ptr("default"), + FileBlockingProfile: to.Ptr("default"), + OutboundTrustCertificate: to.Ptr("default"), + OutboundUnTrustCertificate: to.Ptr("default"), + URLFilteringProfile: to.Ptr("default"), + VulnerabilityProfile: to.Ptr("default"), + }, + }, + }, nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + res, err := poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.GlobalRulestackResource = armpanngfw.GlobalRulestackResource{ + // Name: to.Ptr("armid1"), + // Type: to.Ptr("globalRulestacks"), + // ID: to.Ptr("/providers/PaloAltoNetworks.Cloudngfw/globalrulestacks/armid1"), + // SystemData: &armpanngfw.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // CreatedBy: to.Ptr("praval"), + // CreatedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // LastModifiedBy: to.Ptr("praval"), + // LastModifiedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // }, + // Identity: &armpanngfw.AzureResourceManagerManagedIdentityProperties{ + // Type: to.Ptr(armpanngfw.ManagedIdentityTypeNone), + // PrincipalID: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c27"), + // TenantID: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c27"), + // UserAssignedIdentities: map[string]*armpanngfw.AzureResourceManagerUserAssignedIdentity{ + // "key16": &armpanngfw.AzureResourceManagerUserAssignedIdentity{ + // ClientID: to.Ptr("aaaa"), + // PrincipalID: to.Ptr("aaaaaaaaaaaaaaa"), + // }, + // }, + // }, + // Location: to.Ptr("eastus"), + // Properties: &armpanngfw.RulestackProperties{ + // Description: to.Ptr("global rulestacks"), + // AssociatedSubscriptions: []*string{ + // to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c27")}, + // DefaultMode: to.Ptr(armpanngfw.DefaultModeIPS), + // MinAppIDVersion: to.Ptr("8.5.3"), + // PanEtag: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c12"), + // PanLocation: to.Ptr("eastus"), + // ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateSucceeded), + // Scope: to.Ptr(armpanngfw.ScopeTypeGLOBAL), + // SecurityServices: &armpanngfw.SecurityServices{ + // AntiSpywareProfile: to.Ptr("default"), + // AntiVirusProfile: to.Ptr("default"), + // DNSSubscription: to.Ptr("default"), + // FileBlockingProfile: to.Ptr("default"), + // OutboundTrustCertificate: to.Ptr("default"), + // OutboundUnTrustCertificate: to.Ptr("default"), + // URLFilteringProfile: to.Ptr("default"), + // VulnerabilityProfile: to.Ptr("default"), + // }, + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/GlobalRulestack_CreateOrUpdate_MinimumSet_Gen.json +func ExampleGlobalRulestackClient_BeginCreateOrUpdate_globalRulestackCreateOrUpdateMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewGlobalRulestackClient().BeginCreateOrUpdate(ctx, "praval", armpanngfw.GlobalRulestackResource{ + Location: to.Ptr("eastus"), + Properties: &armpanngfw.RulestackProperties{}, + }, nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + res, err := poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.GlobalRulestackResource = armpanngfw.GlobalRulestackResource{ + // ID: to.Ptr("/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/praval"), + // Location: to.Ptr("eastus"), + // Properties: &armpanngfw.RulestackProperties{ + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/GlobalRulestack_Update_MaximumSet_Gen.json +func ExampleGlobalRulestackClient_Update_globalRulestackUpdateMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewGlobalRulestackClient().Update(ctx, "praval", armpanngfw.GlobalRulestackResourceUpdate{ + Identity: &armpanngfw.AzureResourceManagerManagedIdentityProperties{ + Type: to.Ptr(armpanngfw.ManagedIdentityTypeNone), + UserAssignedIdentities: map[string]*armpanngfw.AzureResourceManagerUserAssignedIdentity{ + "key16": { + ClientID: to.Ptr("aaaa"), + PrincipalID: to.Ptr("aaaaaaaaaaaaaaa"), + }, + }, + }, + Location: to.Ptr("eastus"), + Properties: &armpanngfw.GlobalRulestackResourceUpdateProperties{ + Description: to.Ptr("global rulestacks"), + AssociatedSubscriptions: []*string{ + to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c27")}, + DefaultMode: to.Ptr(armpanngfw.DefaultModeIPS), + MinAppIDVersion: to.Ptr("8.5.3"), + PanEtag: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c12"), + PanLocation: to.Ptr("eastus"), + Scope: to.Ptr(armpanngfw.ScopeTypeGLOBAL), + SecurityServices: &armpanngfw.SecurityServices{ + AntiSpywareProfile: to.Ptr("default"), + AntiVirusProfile: to.Ptr("default"), + DNSSubscription: to.Ptr("default"), + FileBlockingProfile: to.Ptr("default"), + OutboundTrustCertificate: to.Ptr("default"), + OutboundUnTrustCertificate: to.Ptr("default"), + URLFilteringProfile: to.Ptr("default"), + VulnerabilityProfile: to.Ptr("default"), + }, + }, + }, nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.GlobalRulestackResource = armpanngfw.GlobalRulestackResource{ + // Name: to.Ptr("armid1"), + // Type: to.Ptr("globalRulestacks"), + // ID: to.Ptr("/providers/PaloAltoNetworks.Cloudngfw/globalrulestacks/armid1"), + // SystemData: &armpanngfw.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // CreatedBy: to.Ptr("praval"), + // CreatedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // LastModifiedBy: to.Ptr("praval"), + // LastModifiedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // }, + // Identity: &armpanngfw.AzureResourceManagerManagedIdentityProperties{ + // Type: to.Ptr(armpanngfw.ManagedIdentityTypeNone), + // PrincipalID: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c27"), + // TenantID: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c27"), + // UserAssignedIdentities: map[string]*armpanngfw.AzureResourceManagerUserAssignedIdentity{ + // "key16": &armpanngfw.AzureResourceManagerUserAssignedIdentity{ + // ClientID: to.Ptr("aaaa"), + // PrincipalID: to.Ptr("aaaaaaaaaaaaaaa"), + // }, + // }, + // }, + // Location: to.Ptr("eastus"), + // Properties: &armpanngfw.RulestackProperties{ + // Description: to.Ptr("global rulestacks"), + // AssociatedSubscriptions: []*string{ + // to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c27")}, + // DefaultMode: to.Ptr(armpanngfw.DefaultModeIPS), + // MinAppIDVersion: to.Ptr("8.5.3"), + // PanEtag: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c12"), + // PanLocation: to.Ptr("eastus"), + // ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateSucceeded), + // Scope: to.Ptr(armpanngfw.ScopeTypeGLOBAL), + // SecurityServices: &armpanngfw.SecurityServices{ + // AntiSpywareProfile: to.Ptr("default"), + // AntiVirusProfile: to.Ptr("default"), + // DNSSubscription: to.Ptr("default"), + // FileBlockingProfile: to.Ptr("default"), + // OutboundTrustCertificate: to.Ptr("default"), + // OutboundUnTrustCertificate: to.Ptr("default"), + // URLFilteringProfile: to.Ptr("default"), + // VulnerabilityProfile: to.Ptr("default"), + // }, + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/GlobalRulestack_Update_MinimumSet_Gen.json +func ExampleGlobalRulestackClient_Update_globalRulestackUpdateMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewGlobalRulestackClient().Update(ctx, "praval", armpanngfw.GlobalRulestackResourceUpdate{}, nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.GlobalRulestackResource = armpanngfw.GlobalRulestackResource{ + // Location: to.Ptr("eastus"), + // Properties: &armpanngfw.RulestackProperties{ + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/GlobalRulestack_Delete_MaximumSet_Gen.json +func ExampleGlobalRulestackClient_BeginDelete_globalRulestackDeleteMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewGlobalRulestackClient().BeginDelete(ctx, "praval", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/GlobalRulestack_Delete_MinimumSet_Gen.json +func ExampleGlobalRulestackClient_BeginDelete_globalRulestackDeleteMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewGlobalRulestackClient().BeginDelete(ctx, "praval", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/GlobalRulestack_commit_MaximumSet_Gen.json +func ExampleGlobalRulestackClient_BeginCommit_globalRulestackCommitMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewGlobalRulestackClient().BeginCommit(ctx, "praval", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/GlobalRulestack_commit_MinimumSet_Gen.json +func ExampleGlobalRulestackClient_BeginCommit_globalRulestackCommitMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewGlobalRulestackClient().BeginCommit(ctx, "praval", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/GlobalRulestack_getChangeLog_MaximumSet_Gen.json +func ExampleGlobalRulestackClient_GetChangeLog_globalRulestackGetChangeLogMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewGlobalRulestackClient().GetChangeLog(ctx, "praval", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.Changelog = armpanngfw.Changelog{ + // Changes: []*string{ + // to.Ptr("aaaa")}, + // LastCommitted: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // LastModified: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/GlobalRulestack_getChangeLog_MinimumSet_Gen.json +func ExampleGlobalRulestackClient_GetChangeLog_globalRulestackGetChangeLogMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewGlobalRulestackClient().GetChangeLog(ctx, "praval", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.Changelog = armpanngfw.Changelog{ + // Changes: []*string{ + // to.Ptr("aaaa")}, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/GlobalRulestack_listAdvancedSecurityObjects_MaximumSet_Gen.json +func ExampleGlobalRulestackClient_ListAdvancedSecurityObjects_globalRulestackListAdvancedSecurityObjectsMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewGlobalRulestackClient().ListAdvancedSecurityObjects(ctx, "praval", armpanngfw.AdvSecurityObjectTypeEnum("globalRulestacks"), &armpanngfw.GlobalRulestackClientListAdvancedSecurityObjectsOptions{Skip: to.Ptr("a6a321"), + Top: to.Ptr[int32](20), + }) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.AdvSecurityObjectListResponse = armpanngfw.AdvSecurityObjectListResponse{ + // Value: &armpanngfw.AdvSecurityObjectModel{ + // Type: to.Ptr("globalRulestacks"), + // Entry: []*armpanngfw.NameDescriptionObject{ + // { + // Name: to.Ptr("aaaaaaaaaa"), + // Description: to.Ptr("aaaaaaaaaaaa"), + // }}, + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/GlobalRulestack_listAdvancedSecurityObjects_MinimumSet_Gen.json +func ExampleGlobalRulestackClient_ListAdvancedSecurityObjects_globalRulestackListAdvancedSecurityObjectsMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewGlobalRulestackClient().ListAdvancedSecurityObjects(ctx, "praval", armpanngfw.AdvSecurityObjectTypeEnum("globalRulestacks"), &armpanngfw.GlobalRulestackClientListAdvancedSecurityObjectsOptions{Skip: nil, + Top: nil, + }) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.AdvSecurityObjectListResponse = armpanngfw.AdvSecurityObjectListResponse{ + // Value: &armpanngfw.AdvSecurityObjectModel{ + // Entry: []*armpanngfw.NameDescriptionObject{ + // { + // Name: to.Ptr("aaaaaaaaaa"), + // }}, + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/GlobalRulestack_listAppIds_MaximumSet_Gen.json +func ExampleGlobalRulestackClient_ListAppIDs_globalRulestackListAppIdsMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewGlobalRulestackClient().ListAppIDs(ctx, "praval", &armpanngfw.GlobalRulestackClientListAppIDsOptions{AppIDVersion: to.Ptr("8543"), + AppPrefix: to.Ptr("pref"), + Skip: to.Ptr("a6a321"), + Top: to.Ptr[int32](20), + }) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.ListAppIDResponse = armpanngfw.ListAppIDResponse{ + // Value: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaaaaa")}, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/GlobalRulestack_listAppIds_MinimumSet_Gen.json +func ExampleGlobalRulestackClient_ListAppIDs_globalRulestackListAppIdsMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewGlobalRulestackClient().ListAppIDs(ctx, "praval", &armpanngfw.GlobalRulestackClientListAppIDsOptions{AppIDVersion: nil, + AppPrefix: nil, + Skip: nil, + Top: nil, + }) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.ListAppIDResponse = armpanngfw.ListAppIDResponse{ + // Value: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaaaaa")}, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/GlobalRulestack_listCountries_MaximumSet_Gen.json +func ExampleGlobalRulestackClient_ListCountries_globalRulestackListCountriesMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewGlobalRulestackClient().ListCountries(ctx, "praval", &armpanngfw.GlobalRulestackClientListCountriesOptions{Skip: to.Ptr("a6a321"), + Top: to.Ptr[int32](20), + }) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.CountriesResponse = armpanngfw.CountriesResponse{ + // Value: []*armpanngfw.Country{ + // { + // Description: to.Ptr("aaaaa"), + // Code: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaa"), + // }}, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/GlobalRulestack_listCountries_MinimumSet_Gen.json +func ExampleGlobalRulestackClient_ListCountries_globalRulestackListCountriesMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewGlobalRulestackClient().ListCountries(ctx, "praval", &armpanngfw.GlobalRulestackClientListCountriesOptions{Skip: nil, + Top: nil, + }) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.CountriesResponse = armpanngfw.CountriesResponse{ + // Value: []*armpanngfw.Country{ + // { + // Code: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaa"), + // }}, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/GlobalRulestack_listFirewalls_MaximumSet_Gen.json +func ExampleGlobalRulestackClient_ListFirewalls_globalRulestackListFirewallsMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewGlobalRulestackClient().ListFirewalls(ctx, "praval", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.ListFirewallsResponse = armpanngfw.ListFirewallsResponse{ + // Value: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaa")}, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/GlobalRulestack_listFirewalls_MinimumSet_Gen.json +func ExampleGlobalRulestackClient_ListFirewalls_globalRulestackListFirewallsMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewGlobalRulestackClient().ListFirewalls(ctx, "praval", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.ListFirewallsResponse = armpanngfw.ListFirewallsResponse{ + // Value: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaa")}, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/GlobalRulestack_listPredefinedUrlCategories_MaximumSet_Gen.json +func ExampleGlobalRulestackClient_ListPredefinedURLCategories_globalRulestackListPredefinedUrlCategoriesMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewGlobalRulestackClient().ListPredefinedURLCategories(ctx, "praval", &armpanngfw.GlobalRulestackClientListPredefinedURLCategoriesOptions{Skip: to.Ptr("a6a321"), + Top: to.Ptr[int32](20), + }) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.PredefinedURLCategoriesResponse = armpanngfw.PredefinedURLCategoriesResponse{ + // Value: []*armpanngfw.PredefinedURLCategory{ + // { + // Name: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaa"), + // Action: to.Ptr("aaaaaaa"), + // }}, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/GlobalRulestack_listPredefinedUrlCategories_MinimumSet_Gen.json +func ExampleGlobalRulestackClient_ListPredefinedURLCategories_globalRulestackListPredefinedUrlCategoriesMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewGlobalRulestackClient().ListPredefinedURLCategories(ctx, "praval", &armpanngfw.GlobalRulestackClientListPredefinedURLCategoriesOptions{Skip: nil, + Top: nil, + }) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.PredefinedURLCategoriesResponse = armpanngfw.PredefinedURLCategoriesResponse{ + // Value: []*armpanngfw.PredefinedURLCategory{ + // { + // Name: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaa"), + // Action: to.Ptr("aaaaaaa"), + // }}, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/GlobalRulestack_listSecurityServices_MaximumSet_Gen.json +func ExampleGlobalRulestackClient_ListSecurityServices_globalRulestackListSecurityServicesMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewGlobalRulestackClient().ListSecurityServices(ctx, "praval", armpanngfw.SecurityServicesTypeEnum("globalRulestacks"), &armpanngfw.GlobalRulestackClientListSecurityServicesOptions{Skip: to.Ptr("a6a321"), + Top: to.Ptr[int32](20), + }) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.SecurityServicesResponse = armpanngfw.SecurityServicesResponse{ + // Value: &armpanngfw.SecurityServicesTypeList{ + // Type: to.Ptr("globalRulestacks"), + // Entry: []*armpanngfw.NameDescriptionObject{ + // { + // Name: to.Ptr("aaaaaaaaaa"), + // Description: to.Ptr("aaaaaaaaaaaa"), + // }}, + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/GlobalRulestack_listSecurityServices_MinimumSet_Gen.json +func ExampleGlobalRulestackClient_ListSecurityServices_globalRulestackListSecurityServicesMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewGlobalRulestackClient().ListSecurityServices(ctx, "praval", armpanngfw.SecurityServicesTypeEnum("globalRulestacks"), &armpanngfw.GlobalRulestackClientListSecurityServicesOptions{Skip: nil, + Top: nil, + }) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.SecurityServicesResponse = armpanngfw.SecurityServicesResponse{ + // Value: &armpanngfw.SecurityServicesTypeList{ + // Entry: []*armpanngfw.NameDescriptionObject{ + // { + // Name: to.Ptr("aaaaaaaaaa"), + // }}, + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/GlobalRulestack_revert_MaximumSet_Gen.json +func ExampleGlobalRulestackClient_Revert_globalRulestackRevertMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + _, err = clientFactory.NewGlobalRulestackClient().Revert(ctx, "praval", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/GlobalRulestack_revert_MinimumSet_Gen.json +func ExampleGlobalRulestackClient_Revert_globalRulestackRevertMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + _, err = clientFactory.NewGlobalRulestackClient().Revert(ctx, "praval", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } +} diff --git a/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/go.mod b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/go.mod new file mode 100644 index 000000000000..f6f7fce5bdff --- /dev/null +++ b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/go.mod @@ -0,0 +1,21 @@ +module github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw + +go 1.18 + +require ( + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.2 +) + +require ( + github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v0.9.0 // indirect + github.com/golang-jwt/jwt/v4 v4.5.0 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect + github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect + golang.org/x/crypto v0.6.0 // indirect + golang.org/x/net v0.7.0 // indirect + golang.org/x/sys v0.5.0 // indirect + golang.org/x/text v0.7.0 // indirect +) diff --git a/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/go.sum b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/go.sum new file mode 100644 index 000000000000..8ba445a8c4da --- /dev/null +++ b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/go.sum @@ -0,0 +1,31 @@ +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0 h1:rTnT/Jrcm+figWlYz4Ixzt0SJVR2cMC8lvZcimipiEY= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0/go.mod h1:ON4tFdPTwRcgWEaVDrN3584Ef+b7GgSJaXxe5fW9t4M= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.2 h1:uqM+VoHjVH6zdlkLF2b6O0ZANcHoj3rO0PoQ3jglUJA= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.2/go.mod h1:twTKAa1E6hLmSDjLhaCkbTMQKc7p/rNLU40rLxGEOCI= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0 h1:leh5DwKv6Ihwi+h60uHtn6UWAxBbZ0q8DwQVMzf61zw= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= +github.com/AzureAD/microsoft-authentication-library-for-go v0.9.0 h1:UE9n9rkJF62ArLb1F3DEjRt8O3jLwMWdSoypKV4f3MU= +github.com/AzureAD/microsoft-authentication-library-for-go v0.9.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= diff --git a/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/localrules_client.go b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/localrules_client.go new file mode 100644 index 000000000000..e8f6b6dd7edd --- /dev/null +++ b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/localrules_client.go @@ -0,0 +1,509 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armpanngfw + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// LocalRulesClient contains the methods for the LocalRules group. +// Don't use this type directly, use NewLocalRulesClient() instead. +type LocalRulesClient struct { + internal *arm.Client + subscriptionID string +} + +// NewLocalRulesClient creates a new instance of LocalRulesClient with the specified values. +// - subscriptionID - The ID of the target subscription. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewLocalRulesClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*LocalRulesClient, error) { + cl, err := arm.NewClient(moduleName+".LocalRulesClient", moduleVersion, credential, options) + if err != nil { + return nil, err + } + client := &LocalRulesClient{ + subscriptionID: subscriptionID, + internal: cl, + } + return client, nil +} + +// BeginCreateOrUpdate - Create a LocalRulesResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - localRulestackName - LocalRulestack resource name +// - priority - Local Rule priority +// - resource - Resource create parameters. +// - options - LocalRulesClientBeginCreateOrUpdateOptions contains the optional parameters for the LocalRulesClient.BeginCreateOrUpdate +// method. +func (client *LocalRulesClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, localRulestackName string, priority string, resource LocalRulesResource, options *LocalRulesClientBeginCreateOrUpdateOptions) (*runtime.Poller[LocalRulesClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, resourceGroupName, localRulestackName, priority, resource, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[LocalRulesClientCreateOrUpdateResponse]{ + FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + }) + } else { + return runtime.NewPollerFromResumeToken[LocalRulesClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// CreateOrUpdate - Create a LocalRulesResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +func (client *LocalRulesClient) createOrUpdate(ctx context.Context, resourceGroupName string, localRulestackName string, priority string, resource LocalRulesResource, options *LocalRulesClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, localRulestackName, priority, resource, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *LocalRulesClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, priority string, resource LocalRulesResource, options *LocalRulesClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}/localRules/{priority}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if localRulestackName == "" { + return nil, errors.New("parameter localRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{localRulestackName}", url.PathEscape(localRulestackName)) + if priority == "" { + return nil, errors.New("parameter priority cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{priority}", url.PathEscape(priority)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, resource) +} + +// BeginDelete - Delete a LocalRulesResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - localRulestackName - LocalRulestack resource name +// - priority - Local Rule priority +// - options - LocalRulesClientBeginDeleteOptions contains the optional parameters for the LocalRulesClient.BeginDelete method. +func (client *LocalRulesClient) BeginDelete(ctx context.Context, resourceGroupName string, localRulestackName string, priority string, options *LocalRulesClientBeginDeleteOptions) (*runtime.Poller[LocalRulesClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, localRulestackName, priority, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[LocalRulesClientDeleteResponse]{ + FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + }) + } else { + return runtime.NewPollerFromResumeToken[LocalRulesClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// Delete - Delete a LocalRulesResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +func (client *LocalRulesClient) deleteOperation(ctx context.Context, resourceGroupName string, localRulestackName string, priority string, options *LocalRulesClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, localRulestackName, priority, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *LocalRulesClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, priority string, options *LocalRulesClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}/localRules/{priority}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if localRulestackName == "" { + return nil, errors.New("parameter localRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{localRulestackName}", url.PathEscape(localRulestackName)) + if priority == "" { + return nil, errors.New("parameter priority cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{priority}", url.PathEscape(priority)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Get a LocalRulesResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - localRulestackName - LocalRulestack resource name +// - priority - Local Rule priority +// - options - LocalRulesClientGetOptions contains the optional parameters for the LocalRulesClient.Get method. +func (client *LocalRulesClient) Get(ctx context.Context, resourceGroupName string, localRulestackName string, priority string, options *LocalRulesClientGetOptions) (LocalRulesClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, localRulestackName, priority, options) + if err != nil { + return LocalRulesClientGetResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return LocalRulesClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return LocalRulesClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *LocalRulesClient) getCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, priority string, options *LocalRulesClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}/localRules/{priority}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if localRulestackName == "" { + return nil, errors.New("parameter localRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{localRulestackName}", url.PathEscape(localRulestackName)) + if priority == "" { + return nil, errors.New("parameter priority cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{priority}", url.PathEscape(priority)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *LocalRulesClient) getHandleResponse(resp *http.Response) (LocalRulesClientGetResponse, error) { + result := LocalRulesClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.LocalRulesResource); err != nil { + return LocalRulesClientGetResponse{}, err + } + return result, nil +} + +// GetCounters - Get counters +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - localRulestackName - LocalRulestack resource name +// - priority - Local Rule priority +// - options - LocalRulesClientGetCountersOptions contains the optional parameters for the LocalRulesClient.GetCounters method. +func (client *LocalRulesClient) GetCounters(ctx context.Context, resourceGroupName string, localRulestackName string, priority string, options *LocalRulesClientGetCountersOptions) (LocalRulesClientGetCountersResponse, error) { + req, err := client.getCountersCreateRequest(ctx, resourceGroupName, localRulestackName, priority, options) + if err != nil { + return LocalRulesClientGetCountersResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return LocalRulesClientGetCountersResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return LocalRulesClientGetCountersResponse{}, runtime.NewResponseError(resp) + } + return client.getCountersHandleResponse(resp) +} + +// getCountersCreateRequest creates the GetCounters request. +func (client *LocalRulesClient) getCountersCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, priority string, options *LocalRulesClientGetCountersOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}/localRules/{priority}/getCounters" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if localRulestackName == "" { + return nil, errors.New("parameter localRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{localRulestackName}", url.PathEscape(localRulestackName)) + if priority == "" { + return nil, errors.New("parameter priority cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{priority}", url.PathEscape(priority)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + if options != nil && options.FirewallName != nil { + reqQP.Set("firewallName", *options.FirewallName) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getCountersHandleResponse handles the GetCounters response. +func (client *LocalRulesClient) getCountersHandleResponse(resp *http.Response) (LocalRulesClientGetCountersResponse, error) { + result := LocalRulesClientGetCountersResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.RuleCounter); err != nil { + return LocalRulesClientGetCountersResponse{}, err + } + return result, nil +} + +// NewListByLocalRulestacksPager - List LocalRulesResource resources by LocalRulestacks +// +// Generated from API version 2022-08-29-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - localRulestackName - LocalRulestack resource name +// - options - LocalRulesClientListByLocalRulestacksOptions contains the optional parameters for the LocalRulesClient.NewListByLocalRulestacksPager +// method. +func (client *LocalRulesClient) NewListByLocalRulestacksPager(resourceGroupName string, localRulestackName string, options *LocalRulesClientListByLocalRulestacksOptions) *runtime.Pager[LocalRulesClientListByLocalRulestacksResponse] { + return runtime.NewPager(runtime.PagingHandler[LocalRulesClientListByLocalRulestacksResponse]{ + More: func(page LocalRulesClientListByLocalRulestacksResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *LocalRulesClientListByLocalRulestacksResponse) (LocalRulesClientListByLocalRulestacksResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listByLocalRulestacksCreateRequest(ctx, resourceGroupName, localRulestackName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return LocalRulesClientListByLocalRulestacksResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return LocalRulesClientListByLocalRulestacksResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return LocalRulesClientListByLocalRulestacksResponse{}, runtime.NewResponseError(resp) + } + return client.listByLocalRulestacksHandleResponse(resp) + }, + }) +} + +// listByLocalRulestacksCreateRequest creates the ListByLocalRulestacks request. +func (client *LocalRulesClient) listByLocalRulestacksCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, options *LocalRulesClientListByLocalRulestacksOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}/localRules" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if localRulestackName == "" { + return nil, errors.New("parameter localRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{localRulestackName}", url.PathEscape(localRulestackName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listByLocalRulestacksHandleResponse handles the ListByLocalRulestacks response. +func (client *LocalRulesClient) listByLocalRulestacksHandleResponse(resp *http.Response) (LocalRulesClientListByLocalRulestacksResponse, error) { + result := LocalRulesClientListByLocalRulestacksResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.LocalRulesResourceListResult); err != nil { + return LocalRulesClientListByLocalRulestacksResponse{}, err + } + return result, nil +} + +// RefreshCounters - Refresh counters +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - localRulestackName - LocalRulestack resource name +// - priority - Local Rule priority +// - options - LocalRulesClientRefreshCountersOptions contains the optional parameters for the LocalRulesClient.RefreshCounters +// method. +func (client *LocalRulesClient) RefreshCounters(ctx context.Context, resourceGroupName string, localRulestackName string, priority string, options *LocalRulesClientRefreshCountersOptions) (LocalRulesClientRefreshCountersResponse, error) { + req, err := client.refreshCountersCreateRequest(ctx, resourceGroupName, localRulestackName, priority, options) + if err != nil { + return LocalRulesClientRefreshCountersResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return LocalRulesClientRefreshCountersResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusNoContent) { + return LocalRulesClientRefreshCountersResponse{}, runtime.NewResponseError(resp) + } + return LocalRulesClientRefreshCountersResponse{}, nil +} + +// refreshCountersCreateRequest creates the RefreshCounters request. +func (client *LocalRulesClient) refreshCountersCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, priority string, options *LocalRulesClientRefreshCountersOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}/localRules/{priority}/refreshCounters" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if localRulestackName == "" { + return nil, errors.New("parameter localRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{localRulestackName}", url.PathEscape(localRulestackName)) + if priority == "" { + return nil, errors.New("parameter priority cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{priority}", url.PathEscape(priority)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + if options != nil && options.FirewallName != nil { + reqQP.Set("firewallName", *options.FirewallName) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// ResetCounters - Reset counters +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - localRulestackName - LocalRulestack resource name +// - priority - Local Rule priority +// - options - LocalRulesClientResetCountersOptions contains the optional parameters for the LocalRulesClient.ResetCounters +// method. +func (client *LocalRulesClient) ResetCounters(ctx context.Context, resourceGroupName string, localRulestackName string, priority string, options *LocalRulesClientResetCountersOptions) (LocalRulesClientResetCountersResponse, error) { + req, err := client.resetCountersCreateRequest(ctx, resourceGroupName, localRulestackName, priority, options) + if err != nil { + return LocalRulesClientResetCountersResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return LocalRulesClientResetCountersResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return LocalRulesClientResetCountersResponse{}, runtime.NewResponseError(resp) + } + return client.resetCountersHandleResponse(resp) +} + +// resetCountersCreateRequest creates the ResetCounters request. +func (client *LocalRulesClient) resetCountersCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, priority string, options *LocalRulesClientResetCountersOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}/localRules/{priority}/resetCounters" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if localRulestackName == "" { + return nil, errors.New("parameter localRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{localRulestackName}", url.PathEscape(localRulestackName)) + if priority == "" { + return nil, errors.New("parameter priority cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{priority}", url.PathEscape(priority)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + if options != nil && options.FirewallName != nil { + reqQP.Set("firewallName", *options.FirewallName) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// resetCountersHandleResponse handles the ResetCounters response. +func (client *LocalRulesClient) resetCountersHandleResponse(resp *http.Response) (LocalRulesClientResetCountersResponse, error) { + result := LocalRulesClientResetCountersResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.RuleCounterReset); err != nil { + return LocalRulesClientResetCountersResponse{}, err + } + return result, nil +} diff --git a/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/localrules_client_example_test.go b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/localrules_client_example_test.go new file mode 100644 index 000000000000..a27d8a63a149 --- /dev/null +++ b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/localrules_client_example_test.go @@ -0,0 +1,629 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armpanngfw_test + +import ( + "context" + "log" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw" +) + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRules_ListByLocalRulestacks_MaximumSet_Gen.json +func ExampleLocalRulesClient_NewListByLocalRulestacksPager_localRulesListByLocalRulestacksMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + pager := clientFactory.NewLocalRulesClient().NewListByLocalRulestacksPager("firewall-rg", "lrs1", nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + log.Fatalf("failed to advance page: %v", err) + } + for _, v := range page.Value { + // You could use page here. We use blank identifier for just demo purposes. + _ = v + } + // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // page.LocalRulesResourceListResult = armpanngfw.LocalRulesResourceListResult{ + // Value: []*armpanngfw.LocalRulesResource{ + // { + // Name: to.Ptr("aaaaaaaaaaaaaaaaaaaaaa"), + // Type: to.Ptr("a"), + // ID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaa"), + // SystemData: &armpanngfw.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // CreatedBy: to.Ptr("praval"), + // CreatedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // LastModifiedBy: to.Ptr("praval"), + // LastModifiedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // }, + // Properties: &armpanngfw.RuleEntry{ + // Description: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // ActionType: to.Ptr(armpanngfw.ActionEnumAllow), + // Applications: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")}, + // AuditComment: to.Ptr("aaa"), + // Category: &armpanngfw.Category{ + // Feeds: []*string{ + // to.Ptr("aaaaaaaaaaaa")}, + // URLCustom: []*string{ + // to.Ptr("aaaaa")}, + // }, + // DecryptionRuleType: to.Ptr(armpanngfw.DecryptionRuleTypeEnumSSLOutboundInspection), + // Destination: &armpanngfw.DestinationAddr{ + // Cidrs: []*string{ + // to.Ptr("aaaaaaa")}, + // Countries: []*string{ + // to.Ptr("aaaaaaaaaaaaaa")}, + // Feeds: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaa")}, + // FqdnLists: []*string{ + // to.Ptr("aaaaaaaaaaaaa")}, + // PrefixLists: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")}, + // }, + // EnableLogging: to.Ptr(armpanngfw.StateEnumDISABLED), + // Etag: to.Ptr("aaaaaaaaaaaaaaaaaaaa"), + // InboundInspectionCertificate: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // NegateDestination: to.Ptr(armpanngfw.BooleanEnumTRUE), + // NegateSource: to.Ptr(armpanngfw.BooleanEnumTRUE), + // Priority: to.Ptr[int32](24), + // ProtocolPortList: []*string{ + // to.Ptr("aaaaaaaaaaaa")}, + // ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateSucceeded), + // RuleName: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaa"), + // RuleState: to.Ptr(armpanngfw.StateEnumDISABLED), + // Source: &armpanngfw.SourceAddr{ + // Cidrs: []*string{ + // to.Ptr("aaa")}, + // Countries: []*string{ + // to.Ptr("aaaaa")}, + // Feeds: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaa")}, + // PrefixLists: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaa")}, + // }, + // Tags: []*armpanngfw.TagInfo{ + // { + // Key: to.Ptr("keyName"), + // Value: to.Ptr("value"), + // }}, + // Protocol: to.Ptr("aaaa"), + // }, + // }}, + // } + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRules_ListByLocalRulestacks_MinimumSet_Gen.json +func ExampleLocalRulesClient_NewListByLocalRulestacksPager_localRulesListByLocalRulestacksMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + pager := clientFactory.NewLocalRulesClient().NewListByLocalRulestacksPager("firewall-rg", "lrs1", nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + log.Fatalf("failed to advance page: %v", err) + } + for _, v := range page.Value { + // You could use page here. We use blank identifier for just demo purposes. + _ = v + } + // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // page.LocalRulesResourceListResult = armpanngfw.LocalRulesResourceListResult{ + // Value: []*armpanngfw.LocalRulesResource{ + // { + // ID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourcegroups/firewall-rg/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/lrs1/localrules/1"), + // Properties: &armpanngfw.RuleEntry{ + // RuleName: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaa"), + // }, + // }}, + // } + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRules_Get_MaximumSet_Gen.json +func ExampleLocalRulesClient_Get_localRulesGetMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewLocalRulesClient().Get(ctx, "firewall-rg", "lrs1", "1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.LocalRulesResource = armpanngfw.LocalRulesResource{ + // Name: to.Ptr("aaaaaaaaa"), + // Type: to.Ptr("aaaaaaaaa"), + // ID: to.Ptr("aaaaaaaaaaaaaaaaaaa"), + // SystemData: &armpanngfw.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // CreatedBy: to.Ptr("praval"), + // CreatedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // LastModifiedBy: to.Ptr("praval"), + // LastModifiedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // }, + // Properties: &armpanngfw.RuleEntry{ + // Description: to.Ptr("aaaaaaa"), + // ActionType: to.Ptr(armpanngfw.ActionEnumAllow), + // Applications: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaa")}, + // AuditComment: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaa"), + // Category: &armpanngfw.Category{ + // Feeds: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaa")}, + // URLCustom: []*string{ + // to.Ptr("aa")}, + // }, + // DecryptionRuleType: to.Ptr(armpanngfw.DecryptionRuleTypeEnumSSLOutboundInspection), + // Destination: &armpanngfw.DestinationAddr{ + // Cidrs: []*string{ + // to.Ptr("aaaaaaaaaaaa")}, + // Countries: []*string{ + // to.Ptr("aaaaa")}, + // Feeds: []*string{ + // to.Ptr("aaaaaaa")}, + // FqdnLists: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaa")}, + // PrefixLists: []*string{ + // to.Ptr("aaaaaaaaaaaaa")}, + // }, + // EnableLogging: to.Ptr(armpanngfw.StateEnumDISABLED), + // Etag: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaa"), + // InboundInspectionCertificate: to.Ptr("aaaaaaaaaaaaaaaa"), + // NegateDestination: to.Ptr(armpanngfw.BooleanEnumTRUE), + // NegateSource: to.Ptr(armpanngfw.BooleanEnumTRUE), + // Priority: to.Ptr[int32](13), + // ProtocolPortList: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaaaa")}, + // ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateSucceeded), + // RuleName: to.Ptr("aaaaaa"), + // RuleState: to.Ptr(armpanngfw.StateEnumDISABLED), + // Source: &armpanngfw.SourceAddr{ + // Cidrs: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaa")}, + // Countries: []*string{ + // to.Ptr("aaaaaaa")}, + // Feeds: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaa")}, + // PrefixLists: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaa")}, + // }, + // Tags: []*armpanngfw.TagInfo{ + // { + // Key: to.Ptr("keyName"), + // Value: to.Ptr("value"), + // }}, + // Protocol: to.Ptr("aaaaaaaaaaaa"), + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRules_Get_MinimumSet_Gen.json +func ExampleLocalRulesClient_Get_localRulesGetMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewLocalRulesClient().Get(ctx, "firewall-rg", "lrs1", "1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.LocalRulesResource = armpanngfw.LocalRulesResource{ + // ID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourcegroups/firewall-rg/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/grs1/localrules/1"), + // Properties: &armpanngfw.RuleEntry{ + // RuleName: to.Ptr("aaaaaa"), + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRules_CreateOrUpdate_MaximumSet_Gen.json +func ExampleLocalRulesClient_BeginCreateOrUpdate_localRulesCreateOrUpdateMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewLocalRulesClient().BeginCreateOrUpdate(ctx, "firewall-rg", "lrs1", "1", armpanngfw.LocalRulesResource{ + Properties: &armpanngfw.RuleEntry{ + Description: to.Ptr("description of local rule"), + ActionType: to.Ptr(armpanngfw.ActionEnumAllow), + Applications: []*string{ + to.Ptr("app1")}, + AuditComment: to.Ptr("example comment"), + Category: &armpanngfw.Category{ + Feeds: []*string{ + to.Ptr("feed")}, + URLCustom: []*string{ + to.Ptr("https://microsoft.com")}, + }, + DecryptionRuleType: to.Ptr(armpanngfw.DecryptionRuleTypeEnumSSLOutboundInspection), + Destination: &armpanngfw.DestinationAddr{ + Cidrs: []*string{ + to.Ptr("1.0.0.1/10")}, + Countries: []*string{ + to.Ptr("India")}, + Feeds: []*string{ + to.Ptr("feed")}, + FqdnLists: []*string{ + to.Ptr("FQDN1")}, + PrefixLists: []*string{ + to.Ptr("PL1")}, + }, + EnableLogging: to.Ptr(armpanngfw.StateEnumDISABLED), + Etag: to.Ptr("c18e6eef-ba3e-49ee-8a85-2b36c863a9d0"), + InboundInspectionCertificate: to.Ptr("cert1"), + NegateDestination: to.Ptr(armpanngfw.BooleanEnumTRUE), + NegateSource: to.Ptr(armpanngfw.BooleanEnumTRUE), + ProtocolPortList: []*string{ + to.Ptr("80")}, + ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateAccepted), + RuleName: to.Ptr("localRule1"), + RuleState: to.Ptr(armpanngfw.StateEnumDISABLED), + Source: &armpanngfw.SourceAddr{ + Cidrs: []*string{ + to.Ptr("1.0.0.1/10")}, + Countries: []*string{ + to.Ptr("India")}, + Feeds: []*string{ + to.Ptr("feed")}, + PrefixLists: []*string{ + to.Ptr("PL1")}, + }, + Tags: []*armpanngfw.TagInfo{ + { + Key: to.Ptr("keyName"), + Value: to.Ptr("value"), + }}, + Protocol: to.Ptr("HTTP"), + }, + }, nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + res, err := poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.LocalRulesResource = armpanngfw.LocalRulesResource{ + // Name: to.Ptr("aaaaaaaaa"), + // Type: to.Ptr("aaaaaaaaa"), + // ID: to.Ptr("aaaaaaaaaaaaaaaaaaa"), + // SystemData: &armpanngfw.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // CreatedBy: to.Ptr("praval"), + // CreatedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // LastModifiedBy: to.Ptr("praval"), + // LastModifiedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // }, + // Properties: &armpanngfw.RuleEntry{ + // Description: to.Ptr("aaaaaaa"), + // ActionType: to.Ptr(armpanngfw.ActionEnumAllow), + // Applications: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaa")}, + // AuditComment: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaa"), + // Category: &armpanngfw.Category{ + // Feeds: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaa")}, + // URLCustom: []*string{ + // to.Ptr("aa")}, + // }, + // DecryptionRuleType: to.Ptr(armpanngfw.DecryptionRuleTypeEnumSSLOutboundInspection), + // Destination: &armpanngfw.DestinationAddr{ + // Cidrs: []*string{ + // to.Ptr("aaaaaaaaaaaa")}, + // Countries: []*string{ + // to.Ptr("aaaaa")}, + // Feeds: []*string{ + // to.Ptr("aaaaaaa")}, + // FqdnLists: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaa")}, + // PrefixLists: []*string{ + // to.Ptr("aaaaaaaaaaaaa")}, + // }, + // EnableLogging: to.Ptr(armpanngfw.StateEnumDISABLED), + // Etag: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaa"), + // InboundInspectionCertificate: to.Ptr("aaaaaaaaaaaaaaaa"), + // NegateDestination: to.Ptr(armpanngfw.BooleanEnumTRUE), + // NegateSource: to.Ptr(armpanngfw.BooleanEnumTRUE), + // Priority: to.Ptr[int32](13), + // ProtocolPortList: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaaaa")}, + // ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateSucceeded), + // RuleName: to.Ptr("aaaaaa"), + // RuleState: to.Ptr(armpanngfw.StateEnumDISABLED), + // Source: &armpanngfw.SourceAddr{ + // Cidrs: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaa")}, + // Countries: []*string{ + // to.Ptr("aaaaaaa")}, + // Feeds: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaa")}, + // PrefixLists: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaa")}, + // }, + // Tags: []*armpanngfw.TagInfo{ + // { + // Key: to.Ptr("keyName"), + // Value: to.Ptr("value"), + // }}, + // Protocol: to.Ptr("aaaaaaaaaaaa"), + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRules_CreateOrUpdate_MinimumSet_Gen.json +func ExampleLocalRulesClient_BeginCreateOrUpdate_localRulesCreateOrUpdateMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewLocalRulesClient().BeginCreateOrUpdate(ctx, "firewall-rg", "lrs1", "1", armpanngfw.LocalRulesResource{ + Properties: &armpanngfw.RuleEntry{ + RuleName: to.Ptr("localRule1"), + }, + }, nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + res, err := poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.LocalRulesResource = armpanngfw.LocalRulesResource{ + // ID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourcegroups/firewall-rg/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/grs1/localrules/1"), + // Properties: &armpanngfw.RuleEntry{ + // RuleName: to.Ptr("aaaaaa"), + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRules_Delete_MaximumSet_Gen.json +func ExampleLocalRulesClient_BeginDelete_localRulesDeleteMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewLocalRulesClient().BeginDelete(ctx, "firewall-rg", "lrs1", "1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRules_Delete_MinimumSet_Gen.json +func ExampleLocalRulesClient_BeginDelete_localRulesDeleteMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewLocalRulesClient().BeginDelete(ctx, "firewall-rg", "lrs1", "1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRules_getCounters_MaximumSet_Gen.json +func ExampleLocalRulesClient_GetCounters_localRulesGetCountersMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewLocalRulesClient().GetCounters(ctx, "firewall-rg", "lrs1", "1", &armpanngfw.LocalRulesClientGetCountersOptions{FirewallName: to.Ptr("firewall1")}) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.RuleCounter = armpanngfw.RuleCounter{ + // AppSeen: &armpanngfw.AppSeenData{ + // AppSeenList: []*armpanngfw.AppSeenInfo{ + // { + // Category: to.Ptr("aaaaaaaaaaaaaaaaaaa"), + // Risk: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaa"), + // StandardPorts: to.Ptr("aaaaaaaaaaaaaaaaaa"), + // SubCategory: to.Ptr("aaaaaaaaaaaaaaaaa"), + // Tag: to.Ptr("aaaaaaaaaa"), + // Technology: to.Ptr("aaaaaaaaaaaaaaaaaaaa"), + // Title: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // }}, + // Count: to.Ptr[int32](13), + // }, + // FirewallName: to.Ptr("aaaaaaaaaaaaaaaaaa"), + // HitCount: to.Ptr[int32](20), + // LastUpdatedTimestamp: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // Priority: to.Ptr("aaaaaaaaaaaaaaaaaaaa"), + // RequestTimestamp: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // RuleListName: to.Ptr("aaaaaaaaaaaaaaaaaaa"), + // RuleName: to.Ptr("aaaa"), + // RuleStackName: to.Ptr("aaaaaaaaaaaaaaaaa"), + // Timestamp: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRules_getCounters_MinimumSet_Gen.json +func ExampleLocalRulesClient_GetCounters_localRulesGetCountersMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewLocalRulesClient().GetCounters(ctx, "firewall-rg", "lrs1", "1", &armpanngfw.LocalRulesClientGetCountersOptions{FirewallName: nil}) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.RuleCounter = armpanngfw.RuleCounter{ + // Priority: to.Ptr("aaaaaaaaaaaaaaaaaaaa"), + // RuleName: to.Ptr("aaaa"), + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRules_refreshCounters_MaximumSet_Gen.json +func ExampleLocalRulesClient_RefreshCounters_localRulesRefreshCountersMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + _, err = clientFactory.NewLocalRulesClient().RefreshCounters(ctx, "firewall-rg", "lrs1", "1", &armpanngfw.LocalRulesClientRefreshCountersOptions{FirewallName: to.Ptr("firewall1")}) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRules_refreshCounters_MinimumSet_Gen.json +func ExampleLocalRulesClient_RefreshCounters_localRulesRefreshCountersMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + _, err = clientFactory.NewLocalRulesClient().RefreshCounters(ctx, "firewall-rg", "lrs1", "1", &armpanngfw.LocalRulesClientRefreshCountersOptions{FirewallName: nil}) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRules_resetCounters_MaximumSet_Gen.json +func ExampleLocalRulesClient_ResetCounters_localRulesResetCountersMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewLocalRulesClient().ResetCounters(ctx, "firewall-rg", "lrs1", "1", &armpanngfw.LocalRulesClientResetCountersOptions{FirewallName: to.Ptr("firewall1")}) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.RuleCounterReset = armpanngfw.RuleCounterReset{ + // FirewallName: to.Ptr("aaaaaaaaaaaaaaaaaa"), + // Priority: to.Ptr("aaaaaaa"), + // RuleListName: to.Ptr("aaaaa"), + // RuleName: to.Ptr("aaaaa"), + // RuleStackName: to.Ptr("aa"), + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRules_resetCounters_MinimumSet_Gen.json +func ExampleLocalRulesClient_ResetCounters_localRulesResetCountersMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewLocalRulesClient().ResetCounters(ctx, "firewall-rg", "lrs1", "1", &armpanngfw.LocalRulesClientResetCountersOptions{FirewallName: nil}) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.RuleCounterReset = armpanngfw.RuleCounterReset{ + // } +} diff --git a/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/localrulestacks_client.go b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/localrulestacks_client.go new file mode 100644 index 000000000000..61d9b4d69c4e --- /dev/null +++ b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/localrulestacks_client.go @@ -0,0 +1,1041 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armpanngfw + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strconv" + "strings" +) + +// LocalRulestacksClient contains the methods for the LocalRulestacks group. +// Don't use this type directly, use NewLocalRulestacksClient() instead. +type LocalRulestacksClient struct { + internal *arm.Client + subscriptionID string +} + +// NewLocalRulestacksClient creates a new instance of LocalRulestacksClient with the specified values. +// - subscriptionID - The ID of the target subscription. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewLocalRulestacksClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*LocalRulestacksClient, error) { + cl, err := arm.NewClient(moduleName+".LocalRulestacksClient", moduleVersion, credential, options) + if err != nil { + return nil, err + } + client := &LocalRulestacksClient{ + subscriptionID: subscriptionID, + internal: cl, + } + return client, nil +} + +// BeginCommit - Commit rulestack configuration +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - localRulestackName - LocalRulestack resource name +// - options - LocalRulestacksClientBeginCommitOptions contains the optional parameters for the LocalRulestacksClient.BeginCommit +// method. +func (client *LocalRulestacksClient) BeginCommit(ctx context.Context, resourceGroupName string, localRulestackName string, options *LocalRulestacksClientBeginCommitOptions) (*runtime.Poller[LocalRulestacksClientCommitResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.commit(ctx, resourceGroupName, localRulestackName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[LocalRulestacksClientCommitResponse]{ + FinalStateVia: runtime.FinalStateViaLocation, + }) + } else { + return runtime.NewPollerFromResumeToken[LocalRulestacksClientCommitResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// Commit - Commit rulestack configuration +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +func (client *LocalRulestacksClient) commit(ctx context.Context, resourceGroupName string, localRulestackName string, options *LocalRulestacksClientBeginCommitOptions) (*http.Response, error) { + req, err := client.commitCreateRequest(ctx, resourceGroupName, localRulestackName, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// commitCreateRequest creates the Commit request. +func (client *LocalRulestacksClient) commitCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, options *LocalRulestacksClientBeginCommitOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}/commit" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if localRulestackName == "" { + return nil, errors.New("parameter localRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{localRulestackName}", url.PathEscape(localRulestackName)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// BeginCreateOrUpdate - Create a LocalRulestackResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - localRulestackName - LocalRulestack resource name +// - resource - Resource create parameters. +// - options - LocalRulestacksClientBeginCreateOrUpdateOptions contains the optional parameters for the LocalRulestacksClient.BeginCreateOrUpdate +// method. +func (client *LocalRulestacksClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, localRulestackName string, resource LocalRulestackResource, options *LocalRulestacksClientBeginCreateOrUpdateOptions) (*runtime.Poller[LocalRulestacksClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, resourceGroupName, localRulestackName, resource, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[LocalRulestacksClientCreateOrUpdateResponse]{ + FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + }) + } else { + return runtime.NewPollerFromResumeToken[LocalRulestacksClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// CreateOrUpdate - Create a LocalRulestackResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +func (client *LocalRulestacksClient) createOrUpdate(ctx context.Context, resourceGroupName string, localRulestackName string, resource LocalRulestackResource, options *LocalRulestacksClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, localRulestackName, resource, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *LocalRulestacksClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, resource LocalRulestackResource, options *LocalRulestacksClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if localRulestackName == "" { + return nil, errors.New("parameter localRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{localRulestackName}", url.PathEscape(localRulestackName)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, resource) +} + +// BeginDelete - Delete a LocalRulestackResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - localRulestackName - LocalRulestack resource name +// - options - LocalRulestacksClientBeginDeleteOptions contains the optional parameters for the LocalRulestacksClient.BeginDelete +// method. +func (client *LocalRulestacksClient) BeginDelete(ctx context.Context, resourceGroupName string, localRulestackName string, options *LocalRulestacksClientBeginDeleteOptions) (*runtime.Poller[LocalRulestacksClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, localRulestackName, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[LocalRulestacksClientDeleteResponse]{ + FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + }) + } else { + return runtime.NewPollerFromResumeToken[LocalRulestacksClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// Delete - Delete a LocalRulestackResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +func (client *LocalRulestacksClient) deleteOperation(ctx context.Context, resourceGroupName string, localRulestackName string, options *LocalRulestacksClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, localRulestackName, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *LocalRulestacksClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, options *LocalRulestacksClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if localRulestackName == "" { + return nil, errors.New("parameter localRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{localRulestackName}", url.PathEscape(localRulestackName)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Get a LocalRulestackResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - localRulestackName - LocalRulestack resource name +// - options - LocalRulestacksClientGetOptions contains the optional parameters for the LocalRulestacksClient.Get method. +func (client *LocalRulestacksClient) Get(ctx context.Context, resourceGroupName string, localRulestackName string, options *LocalRulestacksClientGetOptions) (LocalRulestacksClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, localRulestackName, options) + if err != nil { + return LocalRulestacksClientGetResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return LocalRulestacksClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return LocalRulestacksClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *LocalRulestacksClient) getCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, options *LocalRulestacksClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if localRulestackName == "" { + return nil, errors.New("parameter localRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{localRulestackName}", url.PathEscape(localRulestackName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *LocalRulestacksClient) getHandleResponse(resp *http.Response) (LocalRulestacksClientGetResponse, error) { + result := LocalRulestacksClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.LocalRulestackResource); err != nil { + return LocalRulestacksClientGetResponse{}, err + } + return result, nil +} + +// GetChangeLog - Get changelog +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - localRulestackName - LocalRulestack resource name +// - options - LocalRulestacksClientGetChangeLogOptions contains the optional parameters for the LocalRulestacksClient.GetChangeLog +// method. +func (client *LocalRulestacksClient) GetChangeLog(ctx context.Context, resourceGroupName string, localRulestackName string, options *LocalRulestacksClientGetChangeLogOptions) (LocalRulestacksClientGetChangeLogResponse, error) { + req, err := client.getChangeLogCreateRequest(ctx, resourceGroupName, localRulestackName, options) + if err != nil { + return LocalRulestacksClientGetChangeLogResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return LocalRulestacksClientGetChangeLogResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return LocalRulestacksClientGetChangeLogResponse{}, runtime.NewResponseError(resp) + } + return client.getChangeLogHandleResponse(resp) +} + +// getChangeLogCreateRequest creates the GetChangeLog request. +func (client *LocalRulestacksClient) getChangeLogCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, options *LocalRulestacksClientGetChangeLogOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}/getChangeLog" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if localRulestackName == "" { + return nil, errors.New("parameter localRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{localRulestackName}", url.PathEscape(localRulestackName)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getChangeLogHandleResponse handles the GetChangeLog response. +func (client *LocalRulestacksClient) getChangeLogHandleResponse(resp *http.Response) (LocalRulestacksClientGetChangeLogResponse, error) { + result := LocalRulestacksClientGetChangeLogResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.Changelog); err != nil { + return LocalRulestacksClientGetChangeLogResponse{}, err + } + return result, nil +} + +// GetSupportInfo - support info for rulestack. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - localRulestackName - LocalRulestack resource name +// - options - LocalRulestacksClientGetSupportInfoOptions contains the optional parameters for the LocalRulestacksClient.GetSupportInfo +// method. +func (client *LocalRulestacksClient) GetSupportInfo(ctx context.Context, resourceGroupName string, localRulestackName string, options *LocalRulestacksClientGetSupportInfoOptions) (LocalRulestacksClientGetSupportInfoResponse, error) { + req, err := client.getSupportInfoCreateRequest(ctx, resourceGroupName, localRulestackName, options) + if err != nil { + return LocalRulestacksClientGetSupportInfoResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return LocalRulestacksClientGetSupportInfoResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return LocalRulestacksClientGetSupportInfoResponse{}, runtime.NewResponseError(resp) + } + return client.getSupportInfoHandleResponse(resp) +} + +// getSupportInfoCreateRequest creates the GetSupportInfo request. +func (client *LocalRulestacksClient) getSupportInfoCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, options *LocalRulestacksClientGetSupportInfoOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}/getSupportInfo" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if localRulestackName == "" { + return nil, errors.New("parameter localRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{localRulestackName}", url.PathEscape(localRulestackName)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + if options != nil && options.Email != nil { + reqQP.Set("email", *options.Email) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getSupportInfoHandleResponse handles the GetSupportInfo response. +func (client *LocalRulestacksClient) getSupportInfoHandleResponse(resp *http.Response) (LocalRulestacksClientGetSupportInfoResponse, error) { + result := LocalRulestacksClientGetSupportInfoResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.SupportInfo); err != nil { + return LocalRulestacksClientGetSupportInfoResponse{}, err + } + return result, nil +} + +// ListAdvancedSecurityObjects - Get the list of advanced security objects +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - localRulestackName - LocalRulestack resource name +// - options - LocalRulestacksClientListAdvancedSecurityObjectsOptions contains the optional parameters for the LocalRulestacksClient.ListAdvancedSecurityObjects +// method. +func (client *LocalRulestacksClient) ListAdvancedSecurityObjects(ctx context.Context, resourceGroupName string, localRulestackName string, typeParam AdvSecurityObjectTypeEnum, options *LocalRulestacksClientListAdvancedSecurityObjectsOptions) (LocalRulestacksClientListAdvancedSecurityObjectsResponse, error) { + req, err := client.listAdvancedSecurityObjectsCreateRequest(ctx, resourceGroupName, localRulestackName, typeParam, options) + if err != nil { + return LocalRulestacksClientListAdvancedSecurityObjectsResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return LocalRulestacksClientListAdvancedSecurityObjectsResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return LocalRulestacksClientListAdvancedSecurityObjectsResponse{}, runtime.NewResponseError(resp) + } + return client.listAdvancedSecurityObjectsHandleResponse(resp) +} + +// listAdvancedSecurityObjectsCreateRequest creates the ListAdvancedSecurityObjects request. +func (client *LocalRulestacksClient) listAdvancedSecurityObjectsCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, typeParam AdvSecurityObjectTypeEnum, options *LocalRulestacksClientListAdvancedSecurityObjectsOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}/listAdvancedSecurityObjects" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if localRulestackName == "" { + return nil, errors.New("parameter localRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{localRulestackName}", url.PathEscape(localRulestackName)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + if options != nil && options.Skip != nil { + reqQP.Set("skip", *options.Skip) + } + if options != nil && options.Top != nil { + reqQP.Set("top", strconv.FormatInt(int64(*options.Top), 10)) + } + reqQP.Set("type", string(typeParam)) + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listAdvancedSecurityObjectsHandleResponse handles the ListAdvancedSecurityObjects response. +func (client *LocalRulestacksClient) listAdvancedSecurityObjectsHandleResponse(resp *http.Response) (LocalRulestacksClientListAdvancedSecurityObjectsResponse, error) { + result := LocalRulestacksClientListAdvancedSecurityObjectsResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.AdvSecurityObjectListResponse); err != nil { + return LocalRulestacksClientListAdvancedSecurityObjectsResponse{}, err + } + return result, nil +} + +// ListAppIDs - List of AppIds for LocalRulestack ApiVersion +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - localRulestackName - LocalRulestack resource name +// - options - LocalRulestacksClientListAppIDsOptions contains the optional parameters for the LocalRulestacksClient.ListAppIDs +// method. +func (client *LocalRulestacksClient) ListAppIDs(ctx context.Context, resourceGroupName string, localRulestackName string, options *LocalRulestacksClientListAppIDsOptions) (LocalRulestacksClientListAppIDsResponse, error) { + req, err := client.listAppIDsCreateRequest(ctx, resourceGroupName, localRulestackName, options) + if err != nil { + return LocalRulestacksClientListAppIDsResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return LocalRulestacksClientListAppIDsResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return LocalRulestacksClientListAppIDsResponse{}, runtime.NewResponseError(resp) + } + return client.listAppIDsHandleResponse(resp) +} + +// listAppIDsCreateRequest creates the ListAppIDs request. +func (client *LocalRulestacksClient) listAppIDsCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, options *LocalRulestacksClientListAppIDsOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}/listAppIds" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if localRulestackName == "" { + return nil, errors.New("parameter localRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{localRulestackName}", url.PathEscape(localRulestackName)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + if options != nil && options.AppIDVersion != nil { + reqQP.Set("appIdVersion", *options.AppIDVersion) + } + if options != nil && options.AppPrefix != nil { + reqQP.Set("appPrefix", *options.AppPrefix) + } + if options != nil && options.Skip != nil { + reqQP.Set("skip", *options.Skip) + } + if options != nil && options.Top != nil { + reqQP.Set("top", strconv.FormatInt(int64(*options.Top), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listAppIDsHandleResponse handles the ListAppIDs response. +func (client *LocalRulestacksClient) listAppIDsHandleResponse(resp *http.Response) (LocalRulestacksClientListAppIDsResponse, error) { + result := LocalRulestacksClientListAppIDsResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.ListAppIDResponse); err != nil { + return LocalRulestacksClientListAppIDsResponse{}, err + } + return result, nil +} + +// NewListByResourceGroupPager - List LocalRulestackResource resources by resource group +// +// Generated from API version 2022-08-29-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - options - LocalRulestacksClientListByResourceGroupOptions contains the optional parameters for the LocalRulestacksClient.NewListByResourceGroupPager +// method. +func (client *LocalRulestacksClient) NewListByResourceGroupPager(resourceGroupName string, options *LocalRulestacksClientListByResourceGroupOptions) *runtime.Pager[LocalRulestacksClientListByResourceGroupResponse] { + return runtime.NewPager(runtime.PagingHandler[LocalRulestacksClientListByResourceGroupResponse]{ + More: func(page LocalRulestacksClientListByResourceGroupResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *LocalRulestacksClientListByResourceGroupResponse) (LocalRulestacksClientListByResourceGroupResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return LocalRulestacksClientListByResourceGroupResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return LocalRulestacksClientListByResourceGroupResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return LocalRulestacksClientListByResourceGroupResponse{}, runtime.NewResponseError(resp) + } + return client.listByResourceGroupHandleResponse(resp) + }, + }) +} + +// listByResourceGroupCreateRequest creates the ListByResourceGroup request. +func (client *LocalRulestacksClient) listByResourceGroupCreateRequest(ctx context.Context, resourceGroupName string, options *LocalRulestacksClientListByResourceGroupOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listByResourceGroupHandleResponse handles the ListByResourceGroup response. +func (client *LocalRulestacksClient) listByResourceGroupHandleResponse(resp *http.Response) (LocalRulestacksClientListByResourceGroupResponse, error) { + result := LocalRulestacksClientListByResourceGroupResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.LocalRulestackResourceListResult); err != nil { + return LocalRulestacksClientListByResourceGroupResponse{}, err + } + return result, nil +} + +// NewListBySubscriptionPager - List LocalRulestackResource resources by subscription ID +// +// Generated from API version 2022-08-29-preview +// - options - LocalRulestacksClientListBySubscriptionOptions contains the optional parameters for the LocalRulestacksClient.NewListBySubscriptionPager +// method. +func (client *LocalRulestacksClient) NewListBySubscriptionPager(options *LocalRulestacksClientListBySubscriptionOptions) *runtime.Pager[LocalRulestacksClientListBySubscriptionResponse] { + return runtime.NewPager(runtime.PagingHandler[LocalRulestacksClientListBySubscriptionResponse]{ + More: func(page LocalRulestacksClientListBySubscriptionResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *LocalRulestacksClientListBySubscriptionResponse) (LocalRulestacksClientListBySubscriptionResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listBySubscriptionCreateRequest(ctx, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return LocalRulestacksClientListBySubscriptionResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return LocalRulestacksClientListBySubscriptionResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return LocalRulestacksClientListBySubscriptionResponse{}, runtime.NewResponseError(resp) + } + return client.listBySubscriptionHandleResponse(resp) + }, + }) +} + +// listBySubscriptionCreateRequest creates the ListBySubscription request. +func (client *LocalRulestacksClient) listBySubscriptionCreateRequest(ctx context.Context, options *LocalRulestacksClientListBySubscriptionOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listBySubscriptionHandleResponse handles the ListBySubscription response. +func (client *LocalRulestacksClient) listBySubscriptionHandleResponse(resp *http.Response) (LocalRulestacksClientListBySubscriptionResponse, error) { + result := LocalRulestacksClientListBySubscriptionResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.LocalRulestackResourceListResult); err != nil { + return LocalRulestacksClientListBySubscriptionResponse{}, err + } + return result, nil +} + +// ListCountries - List of countries for Rulestack +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - localRulestackName - LocalRulestack resource name +// - options - LocalRulestacksClientListCountriesOptions contains the optional parameters for the LocalRulestacksClient.ListCountries +// method. +func (client *LocalRulestacksClient) ListCountries(ctx context.Context, resourceGroupName string, localRulestackName string, options *LocalRulestacksClientListCountriesOptions) (LocalRulestacksClientListCountriesResponse, error) { + req, err := client.listCountriesCreateRequest(ctx, resourceGroupName, localRulestackName, options) + if err != nil { + return LocalRulestacksClientListCountriesResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return LocalRulestacksClientListCountriesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return LocalRulestacksClientListCountriesResponse{}, runtime.NewResponseError(resp) + } + return client.listCountriesHandleResponse(resp) +} + +// listCountriesCreateRequest creates the ListCountries request. +func (client *LocalRulestacksClient) listCountriesCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, options *LocalRulestacksClientListCountriesOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}/listCountries" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if localRulestackName == "" { + return nil, errors.New("parameter localRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{localRulestackName}", url.PathEscape(localRulestackName)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + if options != nil && options.Skip != nil { + reqQP.Set("skip", *options.Skip) + } + if options != nil && options.Top != nil { + reqQP.Set("top", strconv.FormatInt(int64(*options.Top), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listCountriesHandleResponse handles the ListCountries response. +func (client *LocalRulestacksClient) listCountriesHandleResponse(resp *http.Response) (LocalRulestacksClientListCountriesResponse, error) { + result := LocalRulestacksClientListCountriesResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.CountriesResponse); err != nil { + return LocalRulestacksClientListCountriesResponse{}, err + } + return result, nil +} + +// ListFirewalls - List of Firewalls associated with Rulestack +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - localRulestackName - LocalRulestack resource name +// - options - LocalRulestacksClientListFirewallsOptions contains the optional parameters for the LocalRulestacksClient.ListFirewalls +// method. +func (client *LocalRulestacksClient) ListFirewalls(ctx context.Context, resourceGroupName string, localRulestackName string, options *LocalRulestacksClientListFirewallsOptions) (LocalRulestacksClientListFirewallsResponse, error) { + req, err := client.listFirewallsCreateRequest(ctx, resourceGroupName, localRulestackName, options) + if err != nil { + return LocalRulestacksClientListFirewallsResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return LocalRulestacksClientListFirewallsResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return LocalRulestacksClientListFirewallsResponse{}, runtime.NewResponseError(resp) + } + return client.listFirewallsHandleResponse(resp) +} + +// listFirewallsCreateRequest creates the ListFirewalls request. +func (client *LocalRulestacksClient) listFirewallsCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, options *LocalRulestacksClientListFirewallsOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}/listFirewalls" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if localRulestackName == "" { + return nil, errors.New("parameter localRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{localRulestackName}", url.PathEscape(localRulestackName)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listFirewallsHandleResponse handles the ListFirewalls response. +func (client *LocalRulestacksClient) listFirewallsHandleResponse(resp *http.Response) (LocalRulestacksClientListFirewallsResponse, error) { + result := LocalRulestacksClientListFirewallsResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.ListFirewallsResponse); err != nil { + return LocalRulestacksClientListFirewallsResponse{}, err + } + return result, nil +} + +// ListPredefinedURLCategories - List predefined URL categories for rulestack +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - localRulestackName - LocalRulestack resource name +// - options - LocalRulestacksClientListPredefinedURLCategoriesOptions contains the optional parameters for the LocalRulestacksClient.ListPredefinedURLCategories +// method. +func (client *LocalRulestacksClient) ListPredefinedURLCategories(ctx context.Context, resourceGroupName string, localRulestackName string, options *LocalRulestacksClientListPredefinedURLCategoriesOptions) (LocalRulestacksClientListPredefinedURLCategoriesResponse, error) { + req, err := client.listPredefinedURLCategoriesCreateRequest(ctx, resourceGroupName, localRulestackName, options) + if err != nil { + return LocalRulestacksClientListPredefinedURLCategoriesResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return LocalRulestacksClientListPredefinedURLCategoriesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return LocalRulestacksClientListPredefinedURLCategoriesResponse{}, runtime.NewResponseError(resp) + } + return client.listPredefinedURLCategoriesHandleResponse(resp) +} + +// listPredefinedURLCategoriesCreateRequest creates the ListPredefinedURLCategories request. +func (client *LocalRulestacksClient) listPredefinedURLCategoriesCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, options *LocalRulestacksClientListPredefinedURLCategoriesOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}/listPredefinedUrlCategories" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if localRulestackName == "" { + return nil, errors.New("parameter localRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{localRulestackName}", url.PathEscape(localRulestackName)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + if options != nil && options.Skip != nil { + reqQP.Set("skip", *options.Skip) + } + if options != nil && options.Top != nil { + reqQP.Set("top", strconv.FormatInt(int64(*options.Top), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listPredefinedURLCategoriesHandleResponse handles the ListPredefinedURLCategories response. +func (client *LocalRulestacksClient) listPredefinedURLCategoriesHandleResponse(resp *http.Response) (LocalRulestacksClientListPredefinedURLCategoriesResponse, error) { + result := LocalRulestacksClientListPredefinedURLCategoriesResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.PredefinedURLCategoriesResponse); err != nil { + return LocalRulestacksClientListPredefinedURLCategoriesResponse{}, err + } + return result, nil +} + +// ListSecurityServices - List the security services for rulestack +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - localRulestackName - LocalRulestack resource name +// - options - LocalRulestacksClientListSecurityServicesOptions contains the optional parameters for the LocalRulestacksClient.ListSecurityServices +// method. +func (client *LocalRulestacksClient) ListSecurityServices(ctx context.Context, resourceGroupName string, localRulestackName string, typeParam SecurityServicesTypeEnum, options *LocalRulestacksClientListSecurityServicesOptions) (LocalRulestacksClientListSecurityServicesResponse, error) { + req, err := client.listSecurityServicesCreateRequest(ctx, resourceGroupName, localRulestackName, typeParam, options) + if err != nil { + return LocalRulestacksClientListSecurityServicesResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return LocalRulestacksClientListSecurityServicesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return LocalRulestacksClientListSecurityServicesResponse{}, runtime.NewResponseError(resp) + } + return client.listSecurityServicesHandleResponse(resp) +} + +// listSecurityServicesCreateRequest creates the ListSecurityServices request. +func (client *LocalRulestacksClient) listSecurityServicesCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, typeParam SecurityServicesTypeEnum, options *LocalRulestacksClientListSecurityServicesOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}/listSecurityServices" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if localRulestackName == "" { + return nil, errors.New("parameter localRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{localRulestackName}", url.PathEscape(localRulestackName)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + if options != nil && options.Skip != nil { + reqQP.Set("skip", *options.Skip) + } + if options != nil && options.Top != nil { + reqQP.Set("top", strconv.FormatInt(int64(*options.Top), 10)) + } + reqQP.Set("type", string(typeParam)) + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listSecurityServicesHandleResponse handles the ListSecurityServices response. +func (client *LocalRulestacksClient) listSecurityServicesHandleResponse(resp *http.Response) (LocalRulestacksClientListSecurityServicesResponse, error) { + result := LocalRulestacksClientListSecurityServicesResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.SecurityServicesResponse); err != nil { + return LocalRulestacksClientListSecurityServicesResponse{}, err + } + return result, nil +} + +// Revert - Revert rulestack configuration +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - localRulestackName - LocalRulestack resource name +// - options - LocalRulestacksClientRevertOptions contains the optional parameters for the LocalRulestacksClient.Revert method. +func (client *LocalRulestacksClient) Revert(ctx context.Context, resourceGroupName string, localRulestackName string, options *LocalRulestacksClientRevertOptions) (LocalRulestacksClientRevertResponse, error) { + req, err := client.revertCreateRequest(ctx, resourceGroupName, localRulestackName, options) + if err != nil { + return LocalRulestacksClientRevertResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return LocalRulestacksClientRevertResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusNoContent) { + return LocalRulestacksClientRevertResponse{}, runtime.NewResponseError(resp) + } + return LocalRulestacksClientRevertResponse{}, nil +} + +// revertCreateRequest creates the Revert request. +func (client *LocalRulestacksClient) revertCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, options *LocalRulestacksClientRevertOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}/revert" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if localRulestackName == "" { + return nil, errors.New("parameter localRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{localRulestackName}", url.PathEscape(localRulestackName)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Update - Update a LocalRulestackResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - localRulestackName - LocalRulestack resource name +// - properties - The resource properties to be updated. +// - options - LocalRulestacksClientUpdateOptions contains the optional parameters for the LocalRulestacksClient.Update method. +func (client *LocalRulestacksClient) Update(ctx context.Context, resourceGroupName string, localRulestackName string, properties LocalRulestackResourceUpdate, options *LocalRulestacksClientUpdateOptions) (LocalRulestacksClientUpdateResponse, error) { + req, err := client.updateCreateRequest(ctx, resourceGroupName, localRulestackName, properties, options) + if err != nil { + return LocalRulestacksClientUpdateResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return LocalRulestacksClientUpdateResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return LocalRulestacksClientUpdateResponse{}, runtime.NewResponseError(resp) + } + return client.updateHandleResponse(resp) +} + +// updateCreateRequest creates the Update request. +func (client *LocalRulestacksClient) updateCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, properties LocalRulestackResourceUpdate, options *LocalRulestacksClientUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if localRulestackName == "" { + return nil, errors.New("parameter localRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{localRulestackName}", url.PathEscape(localRulestackName)) + req, err := runtime.NewRequest(ctx, http.MethodPatch, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, properties) +} + +// updateHandleResponse handles the Update response. +func (client *LocalRulestacksClient) updateHandleResponse(resp *http.Response) (LocalRulestacksClientUpdateResponse, error) { + result := LocalRulestacksClientUpdateResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.LocalRulestackResource); err != nil { + return LocalRulestacksClientUpdateResponse{}, err + } + return result, nil +} diff --git a/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/localrulestacks_client_example_test.go b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/localrulestacks_client_example_test.go new file mode 100644 index 000000000000..abb88a6d7f04 --- /dev/null +++ b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/localrulestacks_client_example_test.go @@ -0,0 +1,1171 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armpanngfw_test + +import ( + "context" + "log" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw" +) + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRulestacks_ListBySubscription_MaximumSet_Gen.json +func ExampleLocalRulestacksClient_NewListBySubscriptionPager_localRulestacksListBySubscriptionMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + pager := clientFactory.NewLocalRulestacksClient().NewListBySubscriptionPager(nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + log.Fatalf("failed to advance page: %v", err) + } + for _, v := range page.Value { + // You could use page here. We use blank identifier for just demo purposes. + _ = v + } + // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // page.LocalRulestackResourceListResult = armpanngfw.LocalRulestackResourceListResult{ + // Value: []*armpanngfw.LocalRulestackResource{ + // { + // Name: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // Type: to.Ptr("aaaaaaaaaa"), + // ID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // SystemData: &armpanngfw.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // CreatedBy: to.Ptr("praval"), + // CreatedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // LastModifiedBy: to.Ptr("praval"), + // LastModifiedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // }, + // Location: to.Ptr("eastus"), + // Tags: map[string]*string{ + // "tagName": to.Ptr("value"), + // }, + // Identity: &armpanngfw.AzureResourceManagerManagedIdentityProperties{ + // Type: to.Ptr(armpanngfw.ManagedIdentityTypeNone), + // PrincipalID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaa"), + // TenantID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // UserAssignedIdentities: map[string]*armpanngfw.AzureResourceManagerUserAssignedIdentity{ + // "key16": &armpanngfw.AzureResourceManagerUserAssignedIdentity{ + // ClientID: to.Ptr("aaaa"), + // PrincipalID: to.Ptr("aaaaaaaaaaaaaaa"), + // }, + // }, + // }, + // Properties: &armpanngfw.RulestackProperties{ + // Description: to.Ptr("aaaaaaaaaaaaaaaaaaaaa"), + // AssociatedSubscriptions: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaa")}, + // DefaultMode: to.Ptr(armpanngfw.DefaultModeIPS), + // MinAppIDVersion: to.Ptr("aaaaaaaaaaaaaaaaaaaaa"), + // PanEtag: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c12"), + // PanLocation: to.Ptr("eastus"), + // ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateSucceeded), + // Scope: to.Ptr(armpanngfw.ScopeTypeLOCAL), + // SecurityServices: &armpanngfw.SecurityServices{ + // AntiSpywareProfile: to.Ptr("aaaaaaaaaa"), + // AntiVirusProfile: to.Ptr("aaaaaaaaaaaaaaaaaaaaaa"), + // DNSSubscription: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // FileBlockingProfile: to.Ptr("aaaaa"), + // OutboundTrustCertificate: to.Ptr("aaaaaa"), + // OutboundUnTrustCertificate: to.Ptr("aaaaaaaa"), + // URLFilteringProfile: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // VulnerabilityProfile: to.Ptr("aaaaaaaaaa"), + // }, + // }, + // }}, + // } + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRulestacks_ListBySubscription_MinimumSet_Gen.json +func ExampleLocalRulestacksClient_NewListBySubscriptionPager_localRulestacksListBySubscriptionMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + pager := clientFactory.NewLocalRulestacksClient().NewListBySubscriptionPager(nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + log.Fatalf("failed to advance page: %v", err) + } + for _, v := range page.Value { + // You could use page here. We use blank identifier for just demo purposes. + _ = v + } + // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // page.LocalRulestackResourceListResult = armpanngfw.LocalRulestackResourceListResult{ + // Value: []*armpanngfw.LocalRulestackResource{ + // { + // ID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/lrs1"), + // Location: to.Ptr("eastus"), + // Properties: &armpanngfw.RulestackProperties{ + // }, + // }}, + // } + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRulestacks_ListByResourceGroup_MaximumSet_Gen.json +func ExampleLocalRulestacksClient_NewListByResourceGroupPager_localRulestacksListByResourceGroupMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + pager := clientFactory.NewLocalRulestacksClient().NewListByResourceGroupPager("rgopenapi", nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + log.Fatalf("failed to advance page: %v", err) + } + for _, v := range page.Value { + // You could use page here. We use blank identifier for just demo purposes. + _ = v + } + // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // page.LocalRulestackResourceListResult = armpanngfw.LocalRulestackResourceListResult{ + // Value: []*armpanngfw.LocalRulestackResource{ + // { + // Name: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // Type: to.Ptr("aaaaaaaaaa"), + // ID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // SystemData: &armpanngfw.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // CreatedBy: to.Ptr("praval"), + // CreatedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // LastModifiedBy: to.Ptr("praval"), + // LastModifiedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // }, + // Location: to.Ptr("eastus"), + // Tags: map[string]*string{ + // "tagName": to.Ptr("value"), + // }, + // Identity: &armpanngfw.AzureResourceManagerManagedIdentityProperties{ + // Type: to.Ptr(armpanngfw.ManagedIdentityTypeNone), + // PrincipalID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaa"), + // TenantID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // UserAssignedIdentities: map[string]*armpanngfw.AzureResourceManagerUserAssignedIdentity{ + // "key16": &armpanngfw.AzureResourceManagerUserAssignedIdentity{ + // ClientID: to.Ptr("aaaa"), + // PrincipalID: to.Ptr("aaaaaaaaaaaaaaa"), + // }, + // }, + // }, + // Properties: &armpanngfw.RulestackProperties{ + // Description: to.Ptr("aaaaaaaaaaaaaaaaaaaaa"), + // AssociatedSubscriptions: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaa")}, + // DefaultMode: to.Ptr(armpanngfw.DefaultModeIPS), + // MinAppIDVersion: to.Ptr("aaaaaaaaaaaaaaaaaaaaa"), + // PanEtag: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c12"), + // PanLocation: to.Ptr("eastus"), + // ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateSucceeded), + // Scope: to.Ptr(armpanngfw.ScopeTypeLOCAL), + // SecurityServices: &armpanngfw.SecurityServices{ + // AntiSpywareProfile: to.Ptr("aaaaaaaaaa"), + // AntiVirusProfile: to.Ptr("aaaaaaaaaaaaaaaaaaaaaa"), + // DNSSubscription: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // FileBlockingProfile: to.Ptr("aaaaa"), + // OutboundTrustCertificate: to.Ptr("aaaaaa"), + // OutboundUnTrustCertificate: to.Ptr("aaaaaaaa"), + // URLFilteringProfile: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // VulnerabilityProfile: to.Ptr("aaaaaaaaaa"), + // }, + // }, + // }}, + // } + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRulestacks_ListByResourceGroup_MinimumSet_Gen.json +func ExampleLocalRulestacksClient_NewListByResourceGroupPager_localRulestacksListByResourceGroupMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + pager := clientFactory.NewLocalRulestacksClient().NewListByResourceGroupPager("rgopenapi", nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + log.Fatalf("failed to advance page: %v", err) + } + for _, v := range page.Value { + // You could use page here. We use blank identifier for just demo purposes. + _ = v + } + // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // page.LocalRulestackResourceListResult = armpanngfw.LocalRulestackResourceListResult{ + // Value: []*armpanngfw.LocalRulestackResource{ + // { + // ID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourcegroups/rgopenapi/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/lrs1"), + // Location: to.Ptr("eastus"), + // Properties: &armpanngfw.RulestackProperties{ + // }, + // }}, + // } + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRulestacks_Get_MaximumSet_Gen.json +func ExampleLocalRulestacksClient_Get_localRulestacksGetMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewLocalRulestacksClient().Get(ctx, "rgopenapi", "lrs1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.LocalRulestackResource = armpanngfw.LocalRulestackResource{ + // Name: to.Ptr("armid1"), + // Type: to.Ptr("localRulestacks"), + // ID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/firewall-rg/providers/PaloAltoNetworks.Cloudngfw/localrulestacks/armid1"), + // SystemData: &armpanngfw.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // CreatedBy: to.Ptr("praval"), + // CreatedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // LastModifiedBy: to.Ptr("praval"), + // LastModifiedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // }, + // Location: to.Ptr("eastus"), + // Tags: map[string]*string{ + // "tagName": to.Ptr("value"), + // }, + // Identity: &armpanngfw.AzureResourceManagerManagedIdentityProperties{ + // Type: to.Ptr(armpanngfw.ManagedIdentityTypeNone), + // PrincipalID: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c27"), + // TenantID: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c27"), + // UserAssignedIdentities: map[string]*armpanngfw.AzureResourceManagerUserAssignedIdentity{ + // "key16": &armpanngfw.AzureResourceManagerUserAssignedIdentity{ + // ClientID: to.Ptr("aaaa"), + // PrincipalID: to.Ptr("aaaaaaaaaaaaaaa"), + // }, + // }, + // }, + // Properties: &armpanngfw.RulestackProperties{ + // Description: to.Ptr("local rulestacks"), + // AssociatedSubscriptions: []*string{ + // to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c27")}, + // DefaultMode: to.Ptr(armpanngfw.DefaultModeIPS), + // MinAppIDVersion: to.Ptr("8.5.3"), + // PanEtag: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c12"), + // PanLocation: to.Ptr("eastus"), + // ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateSucceeded), + // Scope: to.Ptr(armpanngfw.ScopeTypeLOCAL), + // SecurityServices: &armpanngfw.SecurityServices{ + // AntiSpywareProfile: to.Ptr("default"), + // AntiVirusProfile: to.Ptr("default"), + // DNSSubscription: to.Ptr("default"), + // FileBlockingProfile: to.Ptr("default"), + // OutboundTrustCertificate: to.Ptr("default"), + // OutboundUnTrustCertificate: to.Ptr("default"), + // URLFilteringProfile: to.Ptr("default"), + // VulnerabilityProfile: to.Ptr("default"), + // }, + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRulestacks_Get_MinimumSet_Gen.json +func ExampleLocalRulestacksClient_Get_localRulestacksGetMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewLocalRulestacksClient().Get(ctx, "rgopenapi", "lrs1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.LocalRulestackResource = armpanngfw.LocalRulestackResource{ + // ID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourcegroups/rgopenapi/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/praval"), + // Location: to.Ptr("eastus"), + // Properties: &armpanngfw.RulestackProperties{ + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRulestacks_CreateOrUpdate_MaximumSet_Gen.json +func ExampleLocalRulestacksClient_BeginCreateOrUpdate_localRulestacksCreateOrUpdateMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewLocalRulestacksClient().BeginCreateOrUpdate(ctx, "rgopenapi", "lrs1", armpanngfw.LocalRulestackResource{ + Location: to.Ptr("eastus"), + Tags: map[string]*string{ + "tagName": to.Ptr("value"), + }, + Identity: &armpanngfw.AzureResourceManagerManagedIdentityProperties{ + Type: to.Ptr(armpanngfw.ManagedIdentityTypeNone), + UserAssignedIdentities: map[string]*armpanngfw.AzureResourceManagerUserAssignedIdentity{ + "key16": { + ClientID: to.Ptr("aaaa"), + PrincipalID: to.Ptr("aaaaaaaaaaaaaaa"), + }, + }, + }, + Properties: &armpanngfw.RulestackProperties{ + Description: to.Ptr("local rulestacks"), + AssociatedSubscriptions: []*string{ + to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c27")}, + DefaultMode: to.Ptr(armpanngfw.DefaultModeIPS), + MinAppIDVersion: to.Ptr("8.5.3"), + PanEtag: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c12"), + PanLocation: to.Ptr("eastus"), + ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateAccepted), + Scope: to.Ptr(armpanngfw.ScopeTypeLOCAL), + SecurityServices: &armpanngfw.SecurityServices{ + AntiSpywareProfile: to.Ptr("default"), + AntiVirusProfile: to.Ptr("default"), + DNSSubscription: to.Ptr("default"), + FileBlockingProfile: to.Ptr("default"), + OutboundTrustCertificate: to.Ptr("default"), + OutboundUnTrustCertificate: to.Ptr("default"), + URLFilteringProfile: to.Ptr("default"), + VulnerabilityProfile: to.Ptr("default"), + }, + }, + }, nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + res, err := poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.LocalRulestackResource = armpanngfw.LocalRulestackResource{ + // Name: to.Ptr("armid1"), + // Type: to.Ptr("localRulestacks"), + // ID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/firewall-rg/providers/PaloAltoNetworks.Cloudngfw/localrulestacks/armid1"), + // SystemData: &armpanngfw.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // CreatedBy: to.Ptr("praval"), + // CreatedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // LastModifiedBy: to.Ptr("praval"), + // LastModifiedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // }, + // Location: to.Ptr("eastus"), + // Tags: map[string]*string{ + // "tagName": to.Ptr("value"), + // }, + // Identity: &armpanngfw.AzureResourceManagerManagedIdentityProperties{ + // Type: to.Ptr(armpanngfw.ManagedIdentityTypeNone), + // PrincipalID: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c27"), + // TenantID: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c27"), + // UserAssignedIdentities: map[string]*armpanngfw.AzureResourceManagerUserAssignedIdentity{ + // "key16": &armpanngfw.AzureResourceManagerUserAssignedIdentity{ + // ClientID: to.Ptr("aaaa"), + // PrincipalID: to.Ptr("aaaaaaaaaaaaaaa"), + // }, + // }, + // }, + // Properties: &armpanngfw.RulestackProperties{ + // Description: to.Ptr("local rulestacks"), + // AssociatedSubscriptions: []*string{ + // to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c27")}, + // DefaultMode: to.Ptr(armpanngfw.DefaultModeIPS), + // MinAppIDVersion: to.Ptr("8.5.3"), + // PanEtag: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c12"), + // PanLocation: to.Ptr("eastus"), + // ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateSucceeded), + // Scope: to.Ptr(armpanngfw.ScopeTypeLOCAL), + // SecurityServices: &armpanngfw.SecurityServices{ + // AntiSpywareProfile: to.Ptr("default"), + // AntiVirusProfile: to.Ptr("default"), + // DNSSubscription: to.Ptr("default"), + // FileBlockingProfile: to.Ptr("default"), + // OutboundTrustCertificate: to.Ptr("default"), + // OutboundUnTrustCertificate: to.Ptr("default"), + // URLFilteringProfile: to.Ptr("default"), + // VulnerabilityProfile: to.Ptr("default"), + // }, + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRulestacks_CreateOrUpdate_MinimumSet_Gen.json +func ExampleLocalRulestacksClient_BeginCreateOrUpdate_localRulestacksCreateOrUpdateMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewLocalRulestacksClient().BeginCreateOrUpdate(ctx, "rgopenapi", "lrs1", armpanngfw.LocalRulestackResource{ + Location: to.Ptr("eastus"), + Properties: &armpanngfw.RulestackProperties{}, + }, nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + res, err := poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.LocalRulestackResource = armpanngfw.LocalRulestackResource{ + // ID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourcegroups/rgopenapi/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/praval"), + // Location: to.Ptr("eastus"), + // Properties: &armpanngfw.RulestackProperties{ + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRulestacks_Update_MaximumSet_Gen.json +func ExampleLocalRulestacksClient_Update_localRulestacksUpdateMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewLocalRulestacksClient().Update(ctx, "rgopenapi", "lrs1", armpanngfw.LocalRulestackResourceUpdate{ + Identity: &armpanngfw.AzureResourceManagerManagedIdentityProperties{ + Type: to.Ptr(armpanngfw.ManagedIdentityTypeNone), + UserAssignedIdentities: map[string]*armpanngfw.AzureResourceManagerUserAssignedIdentity{ + "key16": { + ClientID: to.Ptr("aaaa"), + PrincipalID: to.Ptr("aaaaaaaaaaaaaaa"), + }, + }, + }, + Properties: &armpanngfw.LocalRulestackResourceUpdateProperties{ + Description: to.Ptr("local rulestacks"), + AssociatedSubscriptions: []*string{ + to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c27")}, + DefaultMode: to.Ptr(armpanngfw.DefaultModeIPS), + MinAppIDVersion: to.Ptr("8.5.3"), + PanEtag: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c12"), + PanLocation: to.Ptr("eastus"), + Scope: to.Ptr(armpanngfw.ScopeTypeLOCAL), + SecurityServices: &armpanngfw.SecurityServices{ + AntiSpywareProfile: to.Ptr("default"), + AntiVirusProfile: to.Ptr("default"), + DNSSubscription: to.Ptr("default"), + FileBlockingProfile: to.Ptr("default"), + OutboundTrustCertificate: to.Ptr("default"), + OutboundUnTrustCertificate: to.Ptr("default"), + URLFilteringProfile: to.Ptr("default"), + VulnerabilityProfile: to.Ptr("default"), + }, + }, + Tags: map[string]*string{ + "tagName": to.Ptr("value"), + }, + }, nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.LocalRulestackResource = armpanngfw.LocalRulestackResource{ + // Name: to.Ptr("armid1"), + // Type: to.Ptr("localRulestacks"), + // ID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourceGroups/firewall-rg/providers/PaloAltoNetworks.Cloudngfw/localrulestacks/armid1"), + // SystemData: &armpanngfw.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // CreatedBy: to.Ptr("praval"), + // CreatedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // LastModifiedBy: to.Ptr("praval"), + // LastModifiedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // }, + // Location: to.Ptr("eastus"), + // Tags: map[string]*string{ + // "tagName": to.Ptr("value"), + // }, + // Identity: &armpanngfw.AzureResourceManagerManagedIdentityProperties{ + // Type: to.Ptr(armpanngfw.ManagedIdentityTypeNone), + // PrincipalID: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c27"), + // TenantID: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c27"), + // UserAssignedIdentities: map[string]*armpanngfw.AzureResourceManagerUserAssignedIdentity{ + // "key16": &armpanngfw.AzureResourceManagerUserAssignedIdentity{ + // ClientID: to.Ptr("aaaa"), + // PrincipalID: to.Ptr("aaaaaaaaaaaaaaa"), + // }, + // }, + // }, + // Properties: &armpanngfw.RulestackProperties{ + // Description: to.Ptr("local rulestacks"), + // AssociatedSubscriptions: []*string{ + // to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c27")}, + // DefaultMode: to.Ptr(armpanngfw.DefaultModeIPS), + // MinAppIDVersion: to.Ptr("8.5.3"), + // PanEtag: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c12"), + // PanLocation: to.Ptr("eastus"), + // ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateSucceeded), + // Scope: to.Ptr(armpanngfw.ScopeTypeLOCAL), + // SecurityServices: &armpanngfw.SecurityServices{ + // AntiSpywareProfile: to.Ptr("default"), + // AntiVirusProfile: to.Ptr("default"), + // DNSSubscription: to.Ptr("default"), + // FileBlockingProfile: to.Ptr("default"), + // OutboundTrustCertificate: to.Ptr("default"), + // OutboundUnTrustCertificate: to.Ptr("default"), + // URLFilteringProfile: to.Ptr("default"), + // VulnerabilityProfile: to.Ptr("default"), + // }, + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRulestacks_Update_MinimumSet_Gen.json +func ExampleLocalRulestacksClient_Update_localRulestacksUpdateMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewLocalRulestacksClient().Update(ctx, "rgopenapi", "lrs1", armpanngfw.LocalRulestackResourceUpdate{}, nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.LocalRulestackResource = armpanngfw.LocalRulestackResource{ + // Location: to.Ptr("eastus"), + // Properties: &armpanngfw.RulestackProperties{ + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRulestacks_Delete_MaximumSet_Gen.json +func ExampleLocalRulestacksClient_BeginDelete_localRulestacksDeleteMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewLocalRulestacksClient().BeginDelete(ctx, "rgopenapi", "lrs1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRulestacks_Delete_MinimumSet_Gen.json +func ExampleLocalRulestacksClient_BeginDelete_localRulestacksDeleteMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewLocalRulestacksClient().BeginDelete(ctx, "rgopenapi", "lrs1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRulestacks_commit_MaximumSet_Gen.json +func ExampleLocalRulestacksClient_BeginCommit_localRulestacksCommitMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewLocalRulestacksClient().BeginCommit(ctx, "rgopenapi", "lrs1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRulestacks_commit_MinimumSet_Gen.json +func ExampleLocalRulestacksClient_BeginCommit_localRulestacksCommitMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewLocalRulestacksClient().BeginCommit(ctx, "rgopenapi", "lrs1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRulestacks_getChangeLog_MaximumSet_Gen.json +func ExampleLocalRulestacksClient_GetChangeLog_localRulestacksGetChangeLogMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewLocalRulestacksClient().GetChangeLog(ctx, "rgopenapi", "lrs1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.Changelog = armpanngfw.Changelog{ + // Changes: []*string{ + // to.Ptr("aaaa")}, + // LastCommitted: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // LastModified: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRulestacks_getChangeLog_MinimumSet_Gen.json +func ExampleLocalRulestacksClient_GetChangeLog_localRulestacksGetChangeLogMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewLocalRulestacksClient().GetChangeLog(ctx, "rgopenapi", "lrs1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.Changelog = armpanngfw.Changelog{ + // Changes: []*string{ + // to.Ptr("aaaa")}, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRulestacks_getSupportInfo_MaximumSet_Gen.json +func ExampleLocalRulestacksClient_GetSupportInfo_localRulestacksGetSupportInfoMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewLocalRulestacksClient().GetSupportInfo(ctx, "rgopenapi", "lrs1", &armpanngfw.LocalRulestacksClientGetSupportInfoOptions{Email: to.Ptr("user1@domain.com")}) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.SupportInfo = armpanngfw.SupportInfo{ + // AccountID: to.Ptr("3cg5b439-294d-4c25-b0b2-ef649e0g6d38"), + // AccountRegistered: to.Ptr(armpanngfw.BooleanEnumTRUE), + // FreeTrial: to.Ptr(armpanngfw.BooleanEnumTRUE), + // FreeTrialCreditLeft: to.Ptr[int32](10), + // FreeTrialDaysLeft: to.Ptr[int32](1), + // HelpURL: to.Ptr("https://ssopreview.paloaltonetworks.com/home/bookmark/0oa4ao61shG4rd3Ub1d7/2557"), + // ProductSerial: to.Ptr("e22715cb-7e4e-4814-ad4f-ccd1417755d7"), + // ProductSKU: to.Ptr("62f63e3c-bc5a-4d68-a8a1-fcba9f526c90"), + // RegisterURL: to.Ptr("https://ssopreview.paloaltonetworks.com/home/bookmark/0oa4ao61shG4rd3Ub1d7/2557"), + // SupportURL: to.Ptr("https://ssopreview.paloaltonetworks.com/home/bookmark/0oa4ao61shG4rd3Ub1d7/2557"), + // UserDomainSupported: to.Ptr(armpanngfw.BooleanEnumTRUE), + // UserRegistered: to.Ptr(armpanngfw.BooleanEnumTRUE), + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRulestacks_getSupportInfo_MinimumSet_Gen.json +func ExampleLocalRulestacksClient_GetSupportInfo_localRulestacksGetSupportInfoMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewLocalRulestacksClient().GetSupportInfo(ctx, "rgopenapi", "lrs1", &armpanngfw.LocalRulestacksClientGetSupportInfoOptions{Email: nil}) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.SupportInfo = armpanngfw.SupportInfo{ + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRulestacks_listAdvancedSecurityObjects_MaximumSet_Gen.json +func ExampleLocalRulestacksClient_ListAdvancedSecurityObjects_localRulestacksListAdvancedSecurityObjectsMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewLocalRulestacksClient().ListAdvancedSecurityObjects(ctx, "rgopenapi", "lrs1", armpanngfw.AdvSecurityObjectTypeEnum("localRulestacks"), &armpanngfw.LocalRulestacksClientListAdvancedSecurityObjectsOptions{Skip: to.Ptr("a6a321"), + Top: to.Ptr[int32](20), + }) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.AdvSecurityObjectListResponse = armpanngfw.AdvSecurityObjectListResponse{ + // Value: &armpanngfw.AdvSecurityObjectModel{ + // Type: to.Ptr("localRulestacks"), + // Entry: []*armpanngfw.NameDescriptionObject{ + // { + // Name: to.Ptr("aaaaaaaaaa"), + // Description: to.Ptr("aaaaaaaaaaaa"), + // }}, + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRulestacks_listAdvancedSecurityObjects_MinimumSet_Gen.json +func ExampleLocalRulestacksClient_ListAdvancedSecurityObjects_localRulestacksListAdvancedSecurityObjectsMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewLocalRulestacksClient().ListAdvancedSecurityObjects(ctx, "rgopenapi", "lrs1", armpanngfw.AdvSecurityObjectTypeEnum("localRulestacks"), &armpanngfw.LocalRulestacksClientListAdvancedSecurityObjectsOptions{Skip: nil, + Top: nil, + }) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.AdvSecurityObjectListResponse = armpanngfw.AdvSecurityObjectListResponse{ + // Value: &armpanngfw.AdvSecurityObjectModel{ + // Entry: []*armpanngfw.NameDescriptionObject{ + // { + // Name: to.Ptr("aaaaaaaaaa"), + // }}, + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRulestacks_listAppIds_MaximumSet_Gen.json +func ExampleLocalRulestacksClient_ListAppIDs_localRulestacksListAppIdsMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewLocalRulestacksClient().ListAppIDs(ctx, "rgopenapi", "lrs1", &armpanngfw.LocalRulestacksClientListAppIDsOptions{AppIDVersion: to.Ptr("8543"), + AppPrefix: to.Ptr("pref"), + Skip: to.Ptr("a6a321"), + Top: to.Ptr[int32](20), + }) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.ListAppIDResponse = armpanngfw.ListAppIDResponse{ + // Value: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaaaaa")}, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRulestacks_listAppIds_MinimumSet_Gen.json +func ExampleLocalRulestacksClient_ListAppIDs_localRulestacksListAppIdsMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewLocalRulestacksClient().ListAppIDs(ctx, "rgopenapi", "lrs1", &armpanngfw.LocalRulestacksClientListAppIDsOptions{AppIDVersion: nil, + AppPrefix: nil, + Skip: nil, + Top: nil, + }) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.ListAppIDResponse = armpanngfw.ListAppIDResponse{ + // Value: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaaaaa")}, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRulestacks_listCountries_MaximumSet_Gen.json +func ExampleLocalRulestacksClient_ListCountries_localRulestacksListCountriesMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewLocalRulestacksClient().ListCountries(ctx, "rgopenapi", "lrs1", &armpanngfw.LocalRulestacksClientListCountriesOptions{Skip: to.Ptr("a6a321"), + Top: to.Ptr[int32](20), + }) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.CountriesResponse = armpanngfw.CountriesResponse{ + // Value: []*armpanngfw.Country{ + // { + // Description: to.Ptr("aaaaa"), + // Code: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaa"), + // }}, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRulestacks_listCountries_MinimumSet_Gen.json +func ExampleLocalRulestacksClient_ListCountries_localRulestacksListCountriesMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewLocalRulestacksClient().ListCountries(ctx, "rgopenapi", "lrs1", &armpanngfw.LocalRulestacksClientListCountriesOptions{Skip: nil, + Top: nil, + }) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.CountriesResponse = armpanngfw.CountriesResponse{ + // Value: []*armpanngfw.Country{ + // { + // Code: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaa"), + // }}, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRulestacks_listFirewalls_MaximumSet_Gen.json +func ExampleLocalRulestacksClient_ListFirewalls_localRulestacksListFirewallsMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewLocalRulestacksClient().ListFirewalls(ctx, "rgopenapi", "lrs1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.ListFirewallsResponse = armpanngfw.ListFirewallsResponse{ + // Value: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaa")}, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRulestacks_listFirewalls_MinimumSet_Gen.json +func ExampleLocalRulestacksClient_ListFirewalls_localRulestacksListFirewallsMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewLocalRulestacksClient().ListFirewalls(ctx, "rgopenapi", "lrs1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.ListFirewallsResponse = armpanngfw.ListFirewallsResponse{ + // Value: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaa")}, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRulestacks_listPredefinedUrlCategories_MaximumSet_Gen.json +func ExampleLocalRulestacksClient_ListPredefinedURLCategories_localRulestacksListPredefinedUrlCategoriesMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewLocalRulestacksClient().ListPredefinedURLCategories(ctx, "rgopenapi", "lrs1", &armpanngfw.LocalRulestacksClientListPredefinedURLCategoriesOptions{Skip: to.Ptr("a6a321"), + Top: to.Ptr[int32](20), + }) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.PredefinedURLCategoriesResponse = armpanngfw.PredefinedURLCategoriesResponse{ + // Value: []*armpanngfw.PredefinedURLCategory{ + // { + // Name: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaa"), + // Action: to.Ptr("aaaaaaa"), + // }}, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRulestacks_listPredefinedUrlCategories_MinimumSet_Gen.json +func ExampleLocalRulestacksClient_ListPredefinedURLCategories_localRulestacksListPredefinedUrlCategoriesMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewLocalRulestacksClient().ListPredefinedURLCategories(ctx, "rgopenapi", "lrs1", &armpanngfw.LocalRulestacksClientListPredefinedURLCategoriesOptions{Skip: nil, + Top: nil, + }) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.PredefinedURLCategoriesResponse = armpanngfw.PredefinedURLCategoriesResponse{ + // Value: []*armpanngfw.PredefinedURLCategory{ + // { + // Name: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaa"), + // Action: to.Ptr("aaaaaaa"), + // }}, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRulestacks_listSecurityServices_MaximumSet_Gen.json +func ExampleLocalRulestacksClient_ListSecurityServices_localRulestacksListSecurityServicesMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewLocalRulestacksClient().ListSecurityServices(ctx, "rgopenapi", "lrs1", armpanngfw.SecurityServicesTypeEnum("localRulestacks"), &armpanngfw.LocalRulestacksClientListSecurityServicesOptions{Skip: to.Ptr("a6a321"), + Top: to.Ptr[int32](20), + }) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.SecurityServicesResponse = armpanngfw.SecurityServicesResponse{ + // Value: &armpanngfw.SecurityServicesTypeList{ + // Type: to.Ptr("localRulestacks"), + // Entry: []*armpanngfw.NameDescriptionObject{ + // { + // Name: to.Ptr("aaaaaaaaaa"), + // Description: to.Ptr("aaaaaaaaaaaa"), + // }}, + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRulestacks_listSecurityServices_MinimumSet_Gen.json +func ExampleLocalRulestacksClient_ListSecurityServices_localRulestacksListSecurityServicesMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewLocalRulestacksClient().ListSecurityServices(ctx, "rgopenapi", "lrs1", armpanngfw.SecurityServicesTypeEnum("localRulestacks"), &armpanngfw.LocalRulestacksClientListSecurityServicesOptions{Skip: nil, + Top: nil, + }) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.SecurityServicesResponse = armpanngfw.SecurityServicesResponse{ + // Value: &armpanngfw.SecurityServicesTypeList{ + // Entry: []*armpanngfw.NameDescriptionObject{ + // { + // Name: to.Ptr("aaaaaaaaaa"), + // }}, + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRulestacks_revert_MaximumSet_Gen.json +func ExampleLocalRulestacksClient_Revert_localRulestacksRevertMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + _, err = clientFactory.NewLocalRulestacksClient().Revert(ctx, "rgopenapi", "lrs1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/LocalRulestacks_revert_MinimumSet_Gen.json +func ExampleLocalRulestacksClient_Revert_localRulestacksRevertMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + _, err = clientFactory.NewLocalRulestacksClient().Revert(ctx, "rgopenapi", "lrs1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } +} diff --git a/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/models.go b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/models.go new file mode 100644 index 000000000000..d786e70d9847 --- /dev/null +++ b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/models.go @@ -0,0 +1,1924 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armpanngfw + +import "time" + +// AdvSecurityObjectListResponse - advanced security object +type AdvSecurityObjectListResponse struct { + // REQUIRED; response value + Value *AdvSecurityObjectModel + + // next link + NextLink *string +} + +// AdvSecurityObjectModel - List of custom and predefined url category +type AdvSecurityObjectModel struct { + // REQUIRED; URL entry + Entry []*NameDescriptionObject + + // type of object + Type *string +} + +// AppSeenData - Data Type for App Seen +type AppSeenData struct { + // REQUIRED; array of appSeen + AppSeenList []*AppSeenInfo + + // REQUIRED; number of rows + Count *int32 +} + +// AppSeenInfo - Definition for App Seen +type AppSeenInfo struct { + // REQUIRED; category + Category *string + + // REQUIRED; risk + Risk *string + + // REQUIRED; standardPorts + StandardPorts *string + + // REQUIRED; subCategory + SubCategory *string + + // REQUIRED; tag + Tag *string + + // REQUIRED; technology + Technology *string + + // REQUIRED; title + Title *string +} + +// ApplicationInsights - Application Insights key +type ApplicationInsights struct { + // Resource id for Application Insights + ID *string + + // Application Insights key + Key *string +} + +// AzureResourceManagerManagedIdentityProperties - The properties of the managed service identities assigned to this resource. +type AzureResourceManagerManagedIdentityProperties struct { + // REQUIRED; The type of managed identity assigned to this resource. + Type *ManagedIdentityType + + // The identities assigned to this resource by the user. + UserAssignedIdentities map[string]*AzureResourceManagerUserAssignedIdentity + + // READ-ONLY; The active directory identifier of this principal. + PrincipalID *string + + // READ-ONLY; The Active Directory tenant id of the principal. + TenantID *string +} + +// AzureResourceManagerUserAssignedIdentity - A managed identity assigned by the user. +type AzureResourceManagerUserAssignedIdentity struct { + // The active directory client identifier for this principal. + ClientID *string + + // The active directory identifier for this principal. + PrincipalID *string +} + +// Category - URL/EDL to match +type Category struct { + // REQUIRED; feed list + Feeds []*string + + // REQUIRED; custom URL + URLCustom []*string +} + +// CertificateObject - certificate used for inbound and outbound decryption +type CertificateObject struct { + // REQUIRED; use certificate self signed + CertificateSelfSigned *BooleanEnum + + // comment for this object + AuditComment *string + + // Resource Id of certificate signer, to be populated only when certificateSelfSigned is false + CertificateSignerResourceID *string + + // user description for this object + Description *string + + // read only string representing last create or update + Etag *string + + // READ-ONLY; Provisioning state of the resource. + ProvisioningState *ProvisioningState +} + +// CertificateObjectGlobalRulestackClientBeginCreateOrUpdateOptions contains the optional parameters for the CertificateObjectGlobalRulestackClient.BeginCreateOrUpdate +// method. +type CertificateObjectGlobalRulestackClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// CertificateObjectGlobalRulestackClientBeginDeleteOptions contains the optional parameters for the CertificateObjectGlobalRulestackClient.BeginDelete +// method. +type CertificateObjectGlobalRulestackClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// CertificateObjectGlobalRulestackClientGetOptions contains the optional parameters for the CertificateObjectGlobalRulestackClient.Get +// method. +type CertificateObjectGlobalRulestackClientGetOptions struct { + // placeholder for future optional parameters +} + +// CertificateObjectGlobalRulestackClientListOptions contains the optional parameters for the CertificateObjectGlobalRulestackClient.NewListPager +// method. +type CertificateObjectGlobalRulestackClientListOptions struct { + // placeholder for future optional parameters +} + +// CertificateObjectGlobalRulestackResource - GlobalRulestack Certificate Object +type CertificateObjectGlobalRulestackResource struct { + // REQUIRED; The resource-specific properties for this resource. + Properties *CertificateObject + + // READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string + + // READ-ONLY; The name of the resource + Name *string + + // READ-ONLY; Azure Resource Manager metadata containing createdBy and modifiedBy information. + SystemData *SystemData + + // READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string +} + +// CertificateObjectGlobalRulestackResourceListResult - The response of a CertificateObjectGlobalRulestackResource list operation. +type CertificateObjectGlobalRulestackResourceListResult struct { + // REQUIRED; The items on this page + Value []*CertificateObjectGlobalRulestackResource + + // The link to the next page of items + NextLink *string +} + +// CertificateObjectLocalRulestackClientBeginCreateOrUpdateOptions contains the optional parameters for the CertificateObjectLocalRulestackClient.BeginCreateOrUpdate +// method. +type CertificateObjectLocalRulestackClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// CertificateObjectLocalRulestackClientBeginDeleteOptions contains the optional parameters for the CertificateObjectLocalRulestackClient.BeginDelete +// method. +type CertificateObjectLocalRulestackClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// CertificateObjectLocalRulestackClientGetOptions contains the optional parameters for the CertificateObjectLocalRulestackClient.Get +// method. +type CertificateObjectLocalRulestackClientGetOptions struct { + // placeholder for future optional parameters +} + +// CertificateObjectLocalRulestackClientListByLocalRulestacksOptions contains the optional parameters for the CertificateObjectLocalRulestackClient.NewListByLocalRulestacksPager +// method. +type CertificateObjectLocalRulestackClientListByLocalRulestacksOptions struct { + // placeholder for future optional parameters +} + +// CertificateObjectLocalRulestackResource - LocalRulestack Certificate Object +type CertificateObjectLocalRulestackResource struct { + // REQUIRED; The resource-specific properties for this resource. + Properties *CertificateObject + + // READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string + + // READ-ONLY; The name of the resource + Name *string + + // READ-ONLY; Azure Resource Manager metadata containing createdBy and modifiedBy information. + SystemData *SystemData + + // READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string +} + +// CertificateObjectLocalRulestackResourceListResult - The response of a CertificateObjectLocalRulestackResource list operation. +type CertificateObjectLocalRulestackResourceListResult struct { + // REQUIRED; The items on this page + Value []*CertificateObjectLocalRulestackResource + + // The link to the next page of items + NextLink *string +} + +// Changelog list +type Changelog struct { + // REQUIRED; list of changes + Changes []*string + + // lastCommitted timestamp + LastCommitted *time.Time + + // lastModified timestamp + LastModified *time.Time +} + +// CountriesResponse - Countries Response Object +type CountriesResponse struct { + // REQUIRED; List of countries + Value []*Country + + // next link + NextLink *string +} + +// Country Description +type Country struct { + // REQUIRED; country code + Code *string + + // code description + Description *string +} + +// DNSSettings - DNS Proxy settings for Firewall +type DNSSettings struct { + // List of IPs associated with the Firewall + DNSServers []*IPAddress + + // Enable DNS proxy, disabled by default + EnableDNSProxy *DNSProxy + + // Enabled DNS proxy type, disabled by default + EnabledDNSType *EnabledDNSType +} + +// DestinationAddr - destination address +type DestinationAddr struct { + // special value 'any' + Cidrs []*string + + // list of countries + Countries []*string + + // list of feeds + Feeds []*string + + // fqdn list + FqdnLists []*string + + // prefix list + PrefixLists []*string +} + +// EndpointConfiguration - Endpoint Configuration for frontend and backend +type EndpointConfiguration struct { + // REQUIRED; Address Space + Address *IPAddress + + // REQUIRED; port ID + Port *string +} + +// EventHub configurations +type EventHub struct { + // Resource ID of EventHub + ID *string + + // EventHub name + Name *string + + // EventHub namespace + NameSpace *string + + // EventHub policy name + PolicyName *string + + // Subscription Id + SubscriptionID *string +} + +// FirewallDeploymentProperties - Properties specific to the Firewall resource deployment. +type FirewallDeploymentProperties struct { + // REQUIRED; DNS settings for Firewall + DNSSettings *DNSSettings + + // REQUIRED; Marketplace details + MarketplaceDetails *MarketplaceDetails + + // REQUIRED; Network settings + NetworkProfile *NetworkProfile + + // REQUIRED; Billing plan information. + PlanData *PlanData + + // Associated Rulestack + AssociatedRulestack *RulestackDetails + + // Frontend settings for Firewall + FrontEndSettings []*FrontendSetting + + // Panorama Managed: Default is False. Default will be CloudSec managed + IsPanoramaManaged *BooleanEnum + + // panEtag info + PanEtag *string + + // Panorama Configuration + PanoramaConfig *PanoramaConfig + + // READ-ONLY; Provisioning state of the resource. + ProvisioningState *ProvisioningState +} + +// FirewallResource - PaloAltoNetworks Firewall +type FirewallResource struct { + // REQUIRED; The geo-location where the resource lives + Location *string + + // REQUIRED; The resource-specific properties for this resource. + Properties *FirewallDeploymentProperties + + // The managed service identities assigned to this resource. + Identity *AzureResourceManagerManagedIdentityProperties + + // Resource tags. + Tags map[string]*string + + // READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string + + // READ-ONLY; The name of the resource + Name *string + + // READ-ONLY; Azure Resource Manager metadata containing createdBy and modifiedBy information. + SystemData *SystemData + + // READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string +} + +// FirewallResourceListResult - The response of a FirewallResource list operation. +type FirewallResourceListResult struct { + // REQUIRED; The items on this page + Value []*FirewallResource + + // The link to the next page of items + NextLink *string +} + +// FirewallResourceUpdate - The type used for update operations of the FirewallResource. +type FirewallResourceUpdate struct { + // The managed service identities assigned to this resource. + Identity *AzureResourceManagerManagedIdentityProperties + + // The updatable properties of the FirewallResource. + Properties *FirewallResourceUpdateProperties + + // Resource tags. + Tags map[string]*string +} + +// FirewallResourceUpdateProperties - The updatable properties of the FirewallResource. +type FirewallResourceUpdateProperties struct { + // Associated Rulestack + AssociatedRulestack *RulestackDetails + + // DNS settings for Firewall + DNSSettings *DNSSettings + + // Frontend settings for Firewall + FrontEndSettings []*FrontendSetting + + // Panorama Managed: Default is False. Default will be CloudSec managed + IsPanoramaManaged *BooleanEnum + + // Marketplace details + MarketplaceDetails *MarketplaceDetails + + // Network settings + NetworkProfile *NetworkProfile + + // panEtag info + PanEtag *string + + // Panorama Configuration + PanoramaConfig *PanoramaConfig + + // Billing plan information. + PlanData *PlanData +} + +// FirewallStatusClientGetOptions contains the optional parameters for the FirewallStatusClient.Get method. +type FirewallStatusClientGetOptions struct { + // placeholder for future optional parameters +} + +// FirewallStatusClientListByFirewallsOptions contains the optional parameters for the FirewallStatusClient.NewListByFirewallsPager +// method. +type FirewallStatusClientListByFirewallsOptions struct { + // placeholder for future optional parameters +} + +// FirewallStatusProperty - Firewall Status +type FirewallStatusProperty struct { + // READ-ONLY; Detail description of current health of the Firewall + HealthReason *string + + // READ-ONLY; Current status of the Firewall + HealthStatus *HealthStatus + + // READ-ONLY; Panorama Managed: Default is False. Default will be CloudSec managed + IsPanoramaManaged *BooleanEnum + + // READ-ONLY; Panorama Status + PanoramaStatus *PanoramaStatus + + // READ-ONLY; Provisioning state of the resource. + ProvisioningState *ReadOnlyProvisioningState +} + +// FirewallStatusResource - Firewall Status +type FirewallStatusResource struct { + // REQUIRED; The resource-specific properties for this resource. + Properties *FirewallStatusProperty + + // READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string + + // READ-ONLY; The name of the resource + Name *string + + // READ-ONLY; Azure Resource Manager metadata containing createdBy and modifiedBy information. + SystemData *SystemData + + // READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string +} + +// FirewallStatusResourceListResult - The response of a FirewallStatusResource list operation. +type FirewallStatusResourceListResult struct { + // REQUIRED; The items on this page + Value []*FirewallStatusResource + + // The link to the next page of items + NextLink *string +} + +// FirewallsClientBeginCreateOrUpdateOptions contains the optional parameters for the FirewallsClient.BeginCreateOrUpdate +// method. +type FirewallsClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// FirewallsClientBeginDeleteOptions contains the optional parameters for the FirewallsClient.BeginDelete method. +type FirewallsClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// FirewallsClientGetGlobalRulestackOptions contains the optional parameters for the FirewallsClient.GetGlobalRulestack method. +type FirewallsClientGetGlobalRulestackOptions struct { + // placeholder for future optional parameters +} + +// FirewallsClientGetLogProfileOptions contains the optional parameters for the FirewallsClient.GetLogProfile method. +type FirewallsClientGetLogProfileOptions struct { + // placeholder for future optional parameters +} + +// FirewallsClientGetOptions contains the optional parameters for the FirewallsClient.Get method. +type FirewallsClientGetOptions struct { + // placeholder for future optional parameters +} + +// FirewallsClientGetSupportInfoOptions contains the optional parameters for the FirewallsClient.GetSupportInfo method. +type FirewallsClientGetSupportInfoOptions struct { + // email address on behalf of which this API called + Email *string +} + +// FirewallsClientListByResourceGroupOptions contains the optional parameters for the FirewallsClient.NewListByResourceGroupPager +// method. +type FirewallsClientListByResourceGroupOptions struct { + // placeholder for future optional parameters +} + +// FirewallsClientListBySubscriptionOptions contains the optional parameters for the FirewallsClient.NewListBySubscriptionPager +// method. +type FirewallsClientListBySubscriptionOptions struct { + // placeholder for future optional parameters +} + +// FirewallsClientSaveLogProfileOptions contains the optional parameters for the FirewallsClient.SaveLogProfile method. +type FirewallsClientSaveLogProfileOptions struct { + LogSettings *LogSettings +} + +// FirewallsClientUpdateOptions contains the optional parameters for the FirewallsClient.Update method. +type FirewallsClientUpdateOptions struct { + // placeholder for future optional parameters +} + +// FqdnListGlobalRulestackClientBeginCreateOrUpdateOptions contains the optional parameters for the FqdnListGlobalRulestackClient.BeginCreateOrUpdate +// method. +type FqdnListGlobalRulestackClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// FqdnListGlobalRulestackClientBeginDeleteOptions contains the optional parameters for the FqdnListGlobalRulestackClient.BeginDelete +// method. +type FqdnListGlobalRulestackClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// FqdnListGlobalRulestackClientGetOptions contains the optional parameters for the FqdnListGlobalRulestackClient.Get method. +type FqdnListGlobalRulestackClientGetOptions struct { + // placeholder for future optional parameters +} + +// FqdnListGlobalRulestackClientListOptions contains the optional parameters for the FqdnListGlobalRulestackClient.NewListPager +// method. +type FqdnListGlobalRulestackClientListOptions struct { + // placeholder for future optional parameters +} + +// FqdnListGlobalRulestackResource - GlobalRulestack fqdnList +type FqdnListGlobalRulestackResource struct { + // REQUIRED; The resource-specific properties for this resource. + Properties *FqdnObject + + // READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string + + // READ-ONLY; The name of the resource + Name *string + + // READ-ONLY; Azure Resource Manager metadata containing createdBy and modifiedBy information. + SystemData *SystemData + + // READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string +} + +// FqdnListGlobalRulestackResourceListResult - The response of a FqdnListGlobalRulestackResource list operation. +type FqdnListGlobalRulestackResourceListResult struct { + // REQUIRED; The items on this page + Value []*FqdnListGlobalRulestackResource + + // The link to the next page of items + NextLink *string +} + +// FqdnListLocalRulestackClientBeginCreateOrUpdateOptions contains the optional parameters for the FqdnListLocalRulestackClient.BeginCreateOrUpdate +// method. +type FqdnListLocalRulestackClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// FqdnListLocalRulestackClientBeginDeleteOptions contains the optional parameters for the FqdnListLocalRulestackClient.BeginDelete +// method. +type FqdnListLocalRulestackClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// FqdnListLocalRulestackClientGetOptions contains the optional parameters for the FqdnListLocalRulestackClient.Get method. +type FqdnListLocalRulestackClientGetOptions struct { + // placeholder for future optional parameters +} + +// FqdnListLocalRulestackClientListByLocalRulestacksOptions contains the optional parameters for the FqdnListLocalRulestackClient.NewListByLocalRulestacksPager +// method. +type FqdnListLocalRulestackClientListByLocalRulestacksOptions struct { + // placeholder for future optional parameters +} + +// FqdnListLocalRulestackResource - LocalRulestack fqdnList +type FqdnListLocalRulestackResource struct { + // REQUIRED; The resource-specific properties for this resource. + Properties *FqdnObject + + // READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string + + // READ-ONLY; The name of the resource + Name *string + + // READ-ONLY; Azure Resource Manager metadata containing createdBy and modifiedBy information. + SystemData *SystemData + + // READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string +} + +// FqdnListLocalRulestackResourceListResult - The response of a FqdnListLocalRulestackResource list operation. +type FqdnListLocalRulestackResourceListResult struct { + // REQUIRED; The items on this page + Value []*FqdnListLocalRulestackResource + + // The link to the next page of items + NextLink *string +} + +// FqdnObject - fqdn object +type FqdnObject struct { + // REQUIRED; fqdn list + FqdnList []*string + + // comment for this object + AuditComment *string + + // fqdn object description + Description *string + + // etag info + Etag *string + + // READ-ONLY; Provisioning state of the resource. + ProvisioningState *ProvisioningState +} + +// FrontendSetting - Frontend setting for Firewall +type FrontendSetting struct { + // REQUIRED; Backend configurations + BackendConfiguration *EndpointConfiguration + + // REQUIRED; Frontend configurations + FrontendConfiguration *EndpointConfiguration + + // REQUIRED; Settings name + Name *string + + // REQUIRED; Protocol Type + Protocol *ProtocolType +} + +// GlobalRulestackClientBeginCommitOptions contains the optional parameters for the GlobalRulestackClient.BeginCommit method. +type GlobalRulestackClientBeginCommitOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// GlobalRulestackClientBeginCreateOrUpdateOptions contains the optional parameters for the GlobalRulestackClient.BeginCreateOrUpdate +// method. +type GlobalRulestackClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// GlobalRulestackClientBeginDeleteOptions contains the optional parameters for the GlobalRulestackClient.BeginDelete method. +type GlobalRulestackClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// GlobalRulestackClientGetChangeLogOptions contains the optional parameters for the GlobalRulestackClient.GetChangeLog method. +type GlobalRulestackClientGetChangeLogOptions struct { + // placeholder for future optional parameters +} + +// GlobalRulestackClientGetOptions contains the optional parameters for the GlobalRulestackClient.Get method. +type GlobalRulestackClientGetOptions struct { + // placeholder for future optional parameters +} + +// GlobalRulestackClientListAdvancedSecurityObjectsOptions contains the optional parameters for the GlobalRulestackClient.ListAdvancedSecurityObjects +// method. +type GlobalRulestackClientListAdvancedSecurityObjectsOptions struct { + Skip *string + Top *int32 +} + +// GlobalRulestackClientListAppIDsOptions contains the optional parameters for the GlobalRulestackClient.ListAppIDs method. +type GlobalRulestackClientListAppIDsOptions struct { + AppIDVersion *string + AppPrefix *string + Skip *string + Top *int32 +} + +// GlobalRulestackClientListCountriesOptions contains the optional parameters for the GlobalRulestackClient.ListCountries +// method. +type GlobalRulestackClientListCountriesOptions struct { + Skip *string + Top *int32 +} + +// GlobalRulestackClientListFirewallsOptions contains the optional parameters for the GlobalRulestackClient.ListFirewalls +// method. +type GlobalRulestackClientListFirewallsOptions struct { + // placeholder for future optional parameters +} + +// GlobalRulestackClientListOptions contains the optional parameters for the GlobalRulestackClient.NewListPager method. +type GlobalRulestackClientListOptions struct { + // placeholder for future optional parameters +} + +// GlobalRulestackClientListPredefinedURLCategoriesOptions contains the optional parameters for the GlobalRulestackClient.ListPredefinedURLCategories +// method. +type GlobalRulestackClientListPredefinedURLCategoriesOptions struct { + Skip *string + Top *int32 +} + +// GlobalRulestackClientListSecurityServicesOptions contains the optional parameters for the GlobalRulestackClient.ListSecurityServices +// method. +type GlobalRulestackClientListSecurityServicesOptions struct { + Skip *string + Top *int32 +} + +// GlobalRulestackClientRevertOptions contains the optional parameters for the GlobalRulestackClient.Revert method. +type GlobalRulestackClientRevertOptions struct { + // placeholder for future optional parameters +} + +// GlobalRulestackClientUpdateOptions contains the optional parameters for the GlobalRulestackClient.Update method. +type GlobalRulestackClientUpdateOptions struct { + // placeholder for future optional parameters +} + +// GlobalRulestackInfo - PAN Rulestack Describe Object +type GlobalRulestackInfo struct { + // REQUIRED; rulestack description + AzureID *string +} + +// GlobalRulestackResource - PaloAltoNetworks GlobalRulestack +type GlobalRulestackResource struct { + // REQUIRED; Global Location + Location *string + + // REQUIRED; The resource-specific properties for this resource. + Properties *RulestackProperties + + // The managed service identities assigned to this resource. + Identity *AzureResourceManagerManagedIdentityProperties + + // READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string + + // READ-ONLY; The name of the resource + Name *string + + // READ-ONLY; Azure Resource Manager metadata containing createdBy and modifiedBy information. + SystemData *SystemData + + // READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string +} + +// GlobalRulestackResourceListResult - The response of a GlobalRulestackResource list operation. +type GlobalRulestackResourceListResult struct { + // REQUIRED; The items on this page + Value []*GlobalRulestackResource + + // The link to the next page of items + NextLink *string +} + +// GlobalRulestackResourceUpdate - The type used for update operations of the GlobalRulestackResource. +type GlobalRulestackResourceUpdate struct { + // The managed service identities assigned to this resource. + Identity *AzureResourceManagerManagedIdentityProperties + + // Global Location + Location *string + + // The updatable properties of the GlobalRulestackResource. + Properties *GlobalRulestackResourceUpdateProperties +} + +// GlobalRulestackResourceUpdateProperties - The updatable properties of the GlobalRulestackResource. +type GlobalRulestackResourceUpdateProperties struct { + // subscription scope of global rulestack + AssociatedSubscriptions []*string + + // Mode for default rules creation + DefaultMode *DefaultMode + + // rulestack description + Description *string + + // minimum version + MinAppIDVersion *string + + // PanEtag info + PanEtag *string + + // Rulestack Location, Required for GlobalRulestacks, Not for LocalRulestacks + PanLocation *string + + // Rulestack Type + Scope *ScopeType + + // Security Profile + SecurityServices *SecurityServices +} + +// IPAddress - IP Address +type IPAddress struct { + // Address value + Address *string + + // Resource Id + ResourceID *string +} + +// IPAddressSpace - IP Address Space +type IPAddressSpace struct { + // Address Space + AddressSpace *string + + // Resource Id + ResourceID *string +} + +type ListAppIDResponse struct { + // REQUIRED; List of AppIds + Value []*string + + // next Link + NextLink *string +} + +// ListFirewallsResponse - List firewalls response +type ListFirewallsResponse struct { + // REQUIRED; firewalls list + Value []*string + + // next link + NextLink *string +} + +// LocalRulesClientBeginCreateOrUpdateOptions contains the optional parameters for the LocalRulesClient.BeginCreateOrUpdate +// method. +type LocalRulesClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// LocalRulesClientBeginDeleteOptions contains the optional parameters for the LocalRulesClient.BeginDelete method. +type LocalRulesClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// LocalRulesClientGetCountersOptions contains the optional parameters for the LocalRulesClient.GetCounters method. +type LocalRulesClientGetCountersOptions struct { + FirewallName *string +} + +// LocalRulesClientGetOptions contains the optional parameters for the LocalRulesClient.Get method. +type LocalRulesClientGetOptions struct { + // placeholder for future optional parameters +} + +// LocalRulesClientListByLocalRulestacksOptions contains the optional parameters for the LocalRulesClient.NewListByLocalRulestacksPager +// method. +type LocalRulesClientListByLocalRulestacksOptions struct { + // placeholder for future optional parameters +} + +// LocalRulesClientRefreshCountersOptions contains the optional parameters for the LocalRulesClient.RefreshCounters method. +type LocalRulesClientRefreshCountersOptions struct { + FirewallName *string +} + +// LocalRulesClientResetCountersOptions contains the optional parameters for the LocalRulesClient.ResetCounters method. +type LocalRulesClientResetCountersOptions struct { + FirewallName *string +} + +// LocalRulesResource - LocalRulestack rule list +type LocalRulesResource struct { + // REQUIRED; The resource-specific properties for this resource. + Properties *RuleEntry + + // READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string + + // READ-ONLY; The name of the resource + Name *string + + // READ-ONLY; Azure Resource Manager metadata containing createdBy and modifiedBy information. + SystemData *SystemData + + // READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string +} + +// LocalRulesResourceListResult - The response of a LocalRulesResource list operation. +type LocalRulesResourceListResult struct { + // REQUIRED; The items on this page + Value []*LocalRulesResource + + // The link to the next page of items + NextLink *string +} + +// LocalRulestackResource - PaloAltoNetworks LocalRulestack +type LocalRulestackResource struct { + // REQUIRED; The geo-location where the resource lives + Location *string + + // REQUIRED; The resource-specific properties for this resource. + Properties *RulestackProperties + + // The managed service identities assigned to this resource. + Identity *AzureResourceManagerManagedIdentityProperties + + // Resource tags. + Tags map[string]*string + + // READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string + + // READ-ONLY; The name of the resource + Name *string + + // READ-ONLY; Azure Resource Manager metadata containing createdBy and modifiedBy information. + SystemData *SystemData + + // READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string +} + +// LocalRulestackResourceListResult - The response of a LocalRulestackResource list operation. +type LocalRulestackResourceListResult struct { + // REQUIRED; The items on this page + Value []*LocalRulestackResource + + // The link to the next page of items + NextLink *string +} + +// LocalRulestackResourceUpdate - The type used for update operations of the LocalRulestackResource. +type LocalRulestackResourceUpdate struct { + // The managed service identities assigned to this resource. + Identity *AzureResourceManagerManagedIdentityProperties + + // The updatable properties of the LocalRulestackResource. + Properties *LocalRulestackResourceUpdateProperties + + // Resource tags. + Tags map[string]*string +} + +// LocalRulestackResourceUpdateProperties - The updatable properties of the LocalRulestackResource. +type LocalRulestackResourceUpdateProperties struct { + // subscription scope of global rulestack + AssociatedSubscriptions []*string + + // Mode for default rules creation + DefaultMode *DefaultMode + + // rulestack description + Description *string + + // minimum version + MinAppIDVersion *string + + // PanEtag info + PanEtag *string + + // Rulestack Location, Required for GlobalRulestacks, Not for LocalRulestacks + PanLocation *string + + // Rulestack Type + Scope *ScopeType + + // Security Profile + SecurityServices *SecurityServices +} + +// LocalRulestacksClientBeginCommitOptions contains the optional parameters for the LocalRulestacksClient.BeginCommit method. +type LocalRulestacksClientBeginCommitOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// LocalRulestacksClientBeginCreateOrUpdateOptions contains the optional parameters for the LocalRulestacksClient.BeginCreateOrUpdate +// method. +type LocalRulestacksClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// LocalRulestacksClientBeginDeleteOptions contains the optional parameters for the LocalRulestacksClient.BeginDelete method. +type LocalRulestacksClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// LocalRulestacksClientGetChangeLogOptions contains the optional parameters for the LocalRulestacksClient.GetChangeLog method. +type LocalRulestacksClientGetChangeLogOptions struct { + // placeholder for future optional parameters +} + +// LocalRulestacksClientGetOptions contains the optional parameters for the LocalRulestacksClient.Get method. +type LocalRulestacksClientGetOptions struct { + // placeholder for future optional parameters +} + +// LocalRulestacksClientGetSupportInfoOptions contains the optional parameters for the LocalRulestacksClient.GetSupportInfo +// method. +type LocalRulestacksClientGetSupportInfoOptions struct { + // email address on behalf of which this API called + Email *string +} + +// LocalRulestacksClientListAdvancedSecurityObjectsOptions contains the optional parameters for the LocalRulestacksClient.ListAdvancedSecurityObjects +// method. +type LocalRulestacksClientListAdvancedSecurityObjectsOptions struct { + Skip *string + Top *int32 +} + +// LocalRulestacksClientListAppIDsOptions contains the optional parameters for the LocalRulestacksClient.ListAppIDs method. +type LocalRulestacksClientListAppIDsOptions struct { + AppIDVersion *string + AppPrefix *string + Skip *string + Top *int32 +} + +// LocalRulestacksClientListByResourceGroupOptions contains the optional parameters for the LocalRulestacksClient.NewListByResourceGroupPager +// method. +type LocalRulestacksClientListByResourceGroupOptions struct { + // placeholder for future optional parameters +} + +// LocalRulestacksClientListBySubscriptionOptions contains the optional parameters for the LocalRulestacksClient.NewListBySubscriptionPager +// method. +type LocalRulestacksClientListBySubscriptionOptions struct { + // placeholder for future optional parameters +} + +// LocalRulestacksClientListCountriesOptions contains the optional parameters for the LocalRulestacksClient.ListCountries +// method. +type LocalRulestacksClientListCountriesOptions struct { + Skip *string + Top *int32 +} + +// LocalRulestacksClientListFirewallsOptions contains the optional parameters for the LocalRulestacksClient.ListFirewalls +// method. +type LocalRulestacksClientListFirewallsOptions struct { + // placeholder for future optional parameters +} + +// LocalRulestacksClientListPredefinedURLCategoriesOptions contains the optional parameters for the LocalRulestacksClient.ListPredefinedURLCategories +// method. +type LocalRulestacksClientListPredefinedURLCategoriesOptions struct { + Skip *string + Top *int32 +} + +// LocalRulestacksClientListSecurityServicesOptions contains the optional parameters for the LocalRulestacksClient.ListSecurityServices +// method. +type LocalRulestacksClientListSecurityServicesOptions struct { + Skip *string + Top *int32 +} + +// LocalRulestacksClientRevertOptions contains the optional parameters for the LocalRulestacksClient.Revert method. +type LocalRulestacksClientRevertOptions struct { + // placeholder for future optional parameters +} + +// LocalRulestacksClientUpdateOptions contains the optional parameters for the LocalRulestacksClient.Update method. +type LocalRulestacksClientUpdateOptions struct { + // placeholder for future optional parameters +} + +// LogDestination - Log Destination +type LogDestination struct { + // Event Hub configurations + EventHubConfigurations *EventHub + + // Monitor Log configurations + MonitorConfigurations *MonitorLog + + // Storage account configurations + StorageConfigurations *StorageAccount +} + +// LogSettings - Log Settings for Firewall +type LogSettings struct { + // Application Insight details + ApplicationInsights *ApplicationInsights + + // Common destination configurations + CommonDestination *LogDestination + + // Decrypt destination configurations + DecryptLogDestination *LogDestination + + // Log option SAME/INDIVIDUAL + LogOption *LogOption + + // One of possible log type + LogType *LogType + + // Threat destination configurations + ThreatLogDestination *LogDestination + + // Traffic destination configurations + TrafficLogDestination *LogDestination +} + +// MarketplaceDetails of PAN Firewall resource +type MarketplaceDetails struct { + // REQUIRED; Offer Id + OfferID *string + + // REQUIRED; Publisher Id + PublisherID *string + + // Marketplace Subscription Status + MarketplaceSubscriptionStatus *MarketplaceSubscriptionStatus + + // READ-ONLY; Marketplace Subscription Id + MarketplaceSubscriptionID *string +} + +// MonitorLog configurations +type MonitorLog struct { + // Resource ID of MonitorLog + ID *string + + // Primary Key value for Monitor + PrimaryKey *string + + // Secondary Key value for Monitor + SecondaryKey *string + + // Subscription Id + SubscriptionID *string + + // MonitorLog workspace + Workspace *string +} + +// NameDescriptionObject - object type info +type NameDescriptionObject struct { + // REQUIRED; name value + Name *string + + // description value + Description *string +} + +// NetworkProfile - Network settings for Firewall +type NetworkProfile struct { + // REQUIRED; Enable egress NAT, enabled by default + EnableEgressNat *EgressNat + + // REQUIRED; vnet or vwan, cannot be updated + NetworkType *NetworkType + + // REQUIRED; List of IPs associated with the Firewall + PublicIPs []*IPAddress + + // Egress nat IP to use + EgressNatIP []*IPAddress + + // Vnet configurations + VnetConfiguration *VnetConfiguration + + // Vwan configurations + VwanConfiguration *VwanConfiguration +} + +// Operation - Details of a REST API operation, returned from the Resource Provider Operations API +type Operation struct { + // Localized display information for this particular operation. + Display *OperationDisplay + + // READ-ONLY; Enum. Indicates the action type. "Internal" refers to actions that are for internal only APIs. + ActionType *ActionType + + // READ-ONLY; Whether the operation applies to data-plane. This is "true" for data-plane operations and "false" for ARM/control-plane + // operations. + IsDataAction *bool + + // READ-ONLY; The name of the operation, as per Resource-Based Access Control (RBAC). Examples: "Microsoft.Compute/virtualMachines/write", + // "Microsoft.Compute/virtualMachines/capture/action" + Name *string + + // READ-ONLY; The intended executor of the operation; as in Resource Based Access Control (RBAC) and audit logs UX. Default + // value is "user,system" + Origin *Origin +} + +// OperationDisplay - Localized display information for this particular operation. +type OperationDisplay struct { + // READ-ONLY; The short, localized friendly description of the operation; suitable for tool tips and detailed views. + Description *string + + // READ-ONLY; The concise, localized friendly name for the operation; suitable for dropdowns. E.g. "Create or Update Virtual + // Machine", "Restart Virtual Machine". + Operation *string + + // READ-ONLY; The localized friendly form of the resource provider name, e.g. "Microsoft Monitoring Insights" or "Microsoft + // Compute". + Provider *string + + // READ-ONLY; The localized friendly name of the resource type related to this operation. E.g. "Virtual Machines" or "Job + // Schedule Collections". + Resource *string +} + +// OperationListResult - A list of REST API operations supported by an Azure Resource Provider. It contains an URL link to +// get the next set of results. +type OperationListResult struct { + // READ-ONLY; URL to get the next set of operation list results (if there are any). + NextLink *string + + // READ-ONLY; List of operations supported by the resource provider + Value []*Operation +} + +// OperationsClientListOptions contains the optional parameters for the OperationsClient.NewListPager method. +type OperationsClientListOptions struct { + // placeholder for future optional parameters +} + +// PanoramaConfig - Panorama Config +type PanoramaConfig struct { + // REQUIRED; Base64 encoded string representing Panorama parameters to be used by Firewall to connect to Panorama. This string + // is generated via azure plugin in Panorama + ConfigString *string + + // READ-ONLY; Panorama Collector Group to join - (Once configured we can not edit the value) + CgName *string + + // READ-ONLY; Panorama Device Group to join + DgName *string + + // READ-ONLY; Resource name(may be unique) for PN admin + HostName *string + + // READ-ONLY; Primary Panorama Server IP address value in dotted format for IPv4 + PanoramaServer *string + + // READ-ONLY; Secondary Panorama Server IP address value in dotted format for IPv4 + PanoramaServer2 *string + + // READ-ONLY; Panorama Template Stack to join - (Once configured we can not edit the value) + TplName *string + + // READ-ONLY; VM auth key for panorama connectivity + VMAuthKey *string +} + +// PanoramaStatus - Panorama connectivity information +type PanoramaStatus struct { + // READ-ONLY; Secondary Panorama connection status + PanoramaServer2Status *ServerStatus + + // READ-ONLY; Primary Panorama connection status + PanoramaServerStatus *ServerStatus +} + +// PlanData - Billing plan information. +type PlanData struct { + // REQUIRED; different billing cycles like MONTHLY/WEEKLY + BillingCycle *BillingCycle + + // REQUIRED; plan id as published by Liftr.PAN + PlanID *string + + // different usage type like PAYG/COMMITTED + UsageType *UsageType + + // READ-ONLY; date when plan was applied + EffectiveDate *time.Time +} + +// PostRulesClientBeginCreateOrUpdateOptions contains the optional parameters for the PostRulesClient.BeginCreateOrUpdate +// method. +type PostRulesClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// PostRulesClientBeginDeleteOptions contains the optional parameters for the PostRulesClient.BeginDelete method. +type PostRulesClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// PostRulesClientGetCountersOptions contains the optional parameters for the PostRulesClient.GetCounters method. +type PostRulesClientGetCountersOptions struct { + FirewallName *string +} + +// PostRulesClientGetOptions contains the optional parameters for the PostRulesClient.Get method. +type PostRulesClientGetOptions struct { + // placeholder for future optional parameters +} + +// PostRulesClientListOptions contains the optional parameters for the PostRulesClient.NewListPager method. +type PostRulesClientListOptions struct { + // placeholder for future optional parameters +} + +// PostRulesClientRefreshCountersOptions contains the optional parameters for the PostRulesClient.RefreshCounters method. +type PostRulesClientRefreshCountersOptions struct { + FirewallName *string +} + +// PostRulesClientResetCountersOptions contains the optional parameters for the PostRulesClient.ResetCounters method. +type PostRulesClientResetCountersOptions struct { + FirewallName *string +} + +// PostRulesResource - PostRulestack rule list +type PostRulesResource struct { + // REQUIRED; The resource-specific properties for this resource. + Properties *RuleEntry + + // READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string + + // READ-ONLY; The name of the resource + Name *string + + // READ-ONLY; Azure Resource Manager metadata containing createdBy and modifiedBy information. + SystemData *SystemData + + // READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string +} + +// PostRulesResourceListResult - The response of a PostRulesResource list operation. +type PostRulesResourceListResult struct { + // REQUIRED; The items on this page + Value []*PostRulesResource + + // The link to the next page of items + NextLink *string +} + +// PreRulesClientBeginCreateOrUpdateOptions contains the optional parameters for the PreRulesClient.BeginCreateOrUpdate method. +type PreRulesClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// PreRulesClientBeginDeleteOptions contains the optional parameters for the PreRulesClient.BeginDelete method. +type PreRulesClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// PreRulesClientGetCountersOptions contains the optional parameters for the PreRulesClient.GetCounters method. +type PreRulesClientGetCountersOptions struct { + FirewallName *string +} + +// PreRulesClientGetOptions contains the optional parameters for the PreRulesClient.Get method. +type PreRulesClientGetOptions struct { + // placeholder for future optional parameters +} + +// PreRulesClientListOptions contains the optional parameters for the PreRulesClient.NewListPager method. +type PreRulesClientListOptions struct { + // placeholder for future optional parameters +} + +// PreRulesClientRefreshCountersOptions contains the optional parameters for the PreRulesClient.RefreshCounters method. +type PreRulesClientRefreshCountersOptions struct { + FirewallName *string +} + +// PreRulesClientResetCountersOptions contains the optional parameters for the PreRulesClient.ResetCounters method. +type PreRulesClientResetCountersOptions struct { + FirewallName *string +} + +// PreRulesResource - PreRulestack rule list +type PreRulesResource struct { + // REQUIRED; The resource-specific properties for this resource. + Properties *RuleEntry + + // READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string + + // READ-ONLY; The name of the resource + Name *string + + // READ-ONLY; Azure Resource Manager metadata containing createdBy and modifiedBy information. + SystemData *SystemData + + // READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string +} + +// PreRulesResourceListResult - The response of a PreRulesResource list operation. +type PreRulesResourceListResult struct { + // REQUIRED; The items on this page + Value []*PreRulesResource + + // The link to the next page of items + NextLink *string +} + +// PredefinedURLCategoriesResponse - predefined url categories response +type PredefinedURLCategoriesResponse struct { + // REQUIRED; predefined url categories + Value []*PredefinedURLCategory + + // next link + NextLink *string +} + +// PredefinedURLCategory - Predefined URL category object +type PredefinedURLCategory struct { + // REQUIRED + Action *string + + // REQUIRED + Name *string +} + +// PrefixListGlobalRulestackClientBeginCreateOrUpdateOptions contains the optional parameters for the PrefixListGlobalRulestackClient.BeginCreateOrUpdate +// method. +type PrefixListGlobalRulestackClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// PrefixListGlobalRulestackClientBeginDeleteOptions contains the optional parameters for the PrefixListGlobalRulestackClient.BeginDelete +// method. +type PrefixListGlobalRulestackClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// PrefixListGlobalRulestackClientGetOptions contains the optional parameters for the PrefixListGlobalRulestackClient.Get +// method. +type PrefixListGlobalRulestackClientGetOptions struct { + // placeholder for future optional parameters +} + +// PrefixListGlobalRulestackClientListOptions contains the optional parameters for the PrefixListGlobalRulestackClient.NewListPager +// method. +type PrefixListGlobalRulestackClientListOptions struct { + // placeholder for future optional parameters +} + +// PrefixListGlobalRulestackResource - GlobalRulestack prefixList +type PrefixListGlobalRulestackResource struct { + // REQUIRED; The resource-specific properties for this resource. + Properties *PrefixObject + + // READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string + + // READ-ONLY; The name of the resource + Name *string + + // READ-ONLY; Azure Resource Manager metadata containing createdBy and modifiedBy information. + SystemData *SystemData + + // READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string +} + +// PrefixListGlobalRulestackResourceListResult - The response of a PrefixListGlobalRulestackResource list operation. +type PrefixListGlobalRulestackResourceListResult struct { + // REQUIRED; The items on this page + Value []*PrefixListGlobalRulestackResource + + // The link to the next page of items + NextLink *string +} + +// PrefixListLocalRulestackClientBeginCreateOrUpdateOptions contains the optional parameters for the PrefixListLocalRulestackClient.BeginCreateOrUpdate +// method. +type PrefixListLocalRulestackClientBeginCreateOrUpdateOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// PrefixListLocalRulestackClientBeginDeleteOptions contains the optional parameters for the PrefixListLocalRulestackClient.BeginDelete +// method. +type PrefixListLocalRulestackClientBeginDeleteOptions struct { + // Resumes the LRO from the provided token. + ResumeToken string +} + +// PrefixListLocalRulestackClientGetOptions contains the optional parameters for the PrefixListLocalRulestackClient.Get method. +type PrefixListLocalRulestackClientGetOptions struct { + // placeholder for future optional parameters +} + +// PrefixListLocalRulestackClientListByLocalRulestacksOptions contains the optional parameters for the PrefixListLocalRulestackClient.NewListByLocalRulestacksPager +// method. +type PrefixListLocalRulestackClientListByLocalRulestacksOptions struct { + // placeholder for future optional parameters +} + +// PrefixListResource - LocalRulestack prefixList +type PrefixListResource struct { + // REQUIRED; The resource-specific properties for this resource. + Properties *PrefixObject + + // READ-ONLY; Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} + ID *string + + // READ-ONLY; The name of the resource + Name *string + + // READ-ONLY; Azure Resource Manager metadata containing createdBy and modifiedBy information. + SystemData *SystemData + + // READ-ONLY; The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" + Type *string +} + +// PrefixListResourceListResult - The response of a PrefixListResource list operation. +type PrefixListResourceListResult struct { + // REQUIRED; The items on this page + Value []*PrefixListResource + + // The link to the next page of items + NextLink *string +} + +// PrefixObject - prefix entry +type PrefixObject struct { + // REQUIRED; prefix list + PrefixList []*string + + // comment for this object + AuditComment *string + + // prefix description + Description *string + + // etag info + Etag *string + + // READ-ONLY; Provisioning state of the resource. + ProvisioningState *ProvisioningState +} + +// RuleCounter - Rule counter +type RuleCounter struct { + // REQUIRED; priority number + Priority *string + + // REQUIRED; rule name + RuleName *string + + // apps seen + AppSeen *AppSeenData + + // firewall name + FirewallName *string + + // hit count + HitCount *int32 + + // last updated timestamp + LastUpdatedTimestamp *time.Time + + // timestamp of request + RequestTimestamp *time.Time + + // rule list name + RuleListName *string + + // rule Stack Name + RuleStackName *string + + // timestamp of response + Timestamp *time.Time +} + +// RuleCounterReset - Rule counter reset +type RuleCounterReset struct { + // firewall name + FirewallName *string + + // rule list name + RuleListName *string + + // rule name + RuleName *string + + // rule Stack Name + RuleStackName *string + + // READ-ONLY; priority number + Priority *string +} + +// RuleEntry - definition of rule +type RuleEntry struct { + // REQUIRED; rule name + RuleName *string + + // rule action + ActionType *ActionEnum + + // array of rule applications + Applications []*string + + // rule comment + AuditComment *string + + // rule category + Category *Category + + // enable or disable decryption + DecryptionRuleType *DecryptionRuleTypeEnum + + // rule description + Description *string + + // destination address + Destination *DestinationAddr + + // enable or disable logging + EnableLogging *StateEnum + + // etag info + Etag *string + + // inbound Inspection Certificate + InboundInspectionCertificate *string + + // cidr should not be 'any' + NegateDestination *BooleanEnum + + // cidr should not be 'any' + NegateSource *BooleanEnum + + // any, application-default, TCP:number, UDP:number + Protocol *string + + // prot port list + ProtocolPortList []*string + + // state of this rule + RuleState *StateEnum + + // source address + Source *SourceAddr + + // tag for rule + Tags []*TagInfo + + // READ-ONLY + Priority *int32 + + // READ-ONLY; Provisioning state of the resource. + ProvisioningState *ProvisioningState +} + +// RulestackDetails - Associated rulestack details +type RulestackDetails struct { + // Rulestack location + Location *string + + // Resource Id + ResourceID *string + + // Associated rulestack Id + RulestackID *string +} + +// RulestackProperties - PAN Rulestack Describe Object +type RulestackProperties struct { + // subscription scope of global rulestack + AssociatedSubscriptions []*string + + // Mode for default rules creation + DefaultMode *DefaultMode + + // rulestack description + Description *string + + // minimum version + MinAppIDVersion *string + + // PanEtag info + PanEtag *string + + // Rulestack Location, Required for GlobalRulestacks, Not for LocalRulestacks + PanLocation *string + + // Rulestack Type + Scope *ScopeType + + // Security Profile + SecurityServices *SecurityServices + + // READ-ONLY; Provisioning state of the resource. + ProvisioningState *ProvisioningState +} + +// SecurityServices - security services +type SecurityServices struct { + // Anti spyware Profile data + AntiSpywareProfile *string + + // anti virus profile data + AntiVirusProfile *string + + // DNS Subscription profile data + DNSSubscription *string + + // File blocking profile data + FileBlockingProfile *string + + // Trusted Egress Decryption profile data + OutboundTrustCertificate *string + + // Untrusted Egress Decryption profile data + OutboundUnTrustCertificate *string + + // URL filtering profile data + URLFilteringProfile *string + + // IPs Vulnerability Profile Data + VulnerabilityProfile *string +} + +// SecurityServicesResponse - Security services list response +type SecurityServicesResponse struct { + // REQUIRED; response value + Value *SecurityServicesTypeList + + // next link + NextLink *string +} + +// SecurityServicesTypeList - Security services type list +type SecurityServicesTypeList struct { + // REQUIRED; list + Entry []*NameDescriptionObject + + // security services type + Type *string +} + +// SourceAddr - Address properties +type SourceAddr struct { + // special value 'any' + Cidrs []*string + + // list of countries + Countries []*string + + // list of feeds + Feeds []*string + + // prefix list + PrefixLists []*string +} + +// StorageAccount - Storage Account configurations +type StorageAccount struct { + // Storage account name + AccountName *string + + // Resource ID of storage account + ID *string + + // Subscription Id + SubscriptionID *string +} + +// SupportInfo - Support information for the resource +type SupportInfo struct { + // Support account associated with given resource + AccountID *string + + // account registered in Customer Support Portal + AccountRegistered *BooleanEnum + + // Product usage is in free trial period + FreeTrial *BooleanEnum + + // Free trial credit remaining + FreeTrialCreditLeft *int32 + + // Free trial days remaining + FreeTrialDaysLeft *int32 + + // URL for paloaltonetworks live community + HelpURL *string + + // product SKU associated with given resource + ProductSKU *string + + // product Serial associated with given resource + ProductSerial *string + + // URL for registering product in paloaltonetworks Customer Service Portal + RegisterURL *string + + // URL for paloaltonetworks Customer Service Portal + SupportURL *string + + // user domain is supported in Customer Support Portal + UserDomainSupported *BooleanEnum + + // user registered in Customer Support Portal + UserRegistered *BooleanEnum +} + +// SystemData - Metadata pertaining to creation and last modification of the resource. +type SystemData struct { + // The timestamp of resource creation (UTC). + CreatedAt *time.Time + + // The identity that created the resource. + CreatedBy *string + + // The type of identity that created the resource. + CreatedByType *CreatedByType + + // The timestamp of resource last modification (UTC) + LastModifiedAt *time.Time + + // The identity that last modified the resource. + LastModifiedBy *string + + // The type of identity that last modified the resource. + LastModifiedByType *CreatedByType +} + +// TagInfo - Tag +type TagInfo struct { + // REQUIRED; tag name + Key *string + + // REQUIRED; tag value + Value *string +} + +// VnetConfiguration - VnetInfo for Firewall Networking +type VnetConfiguration struct { + // REQUIRED; Trust Subnet + TrustSubnet *IPAddressSpace + + // REQUIRED; Untrust Subnet + UnTrustSubnet *IPAddressSpace + + // REQUIRED; Azure Virtual Network + Vnet *IPAddressSpace + + // IP of trust subnet for UDR + IPOfTrustSubnetForUdr *IPAddress +} + +// VwanConfiguration - VwanInfo for Firewall Networking +type VwanConfiguration struct { + // REQUIRED; vHub Address + VHub *IPAddressSpace + + // IP of trust subnet for UDR + IPOfTrustSubnetForUdr *IPAddress + + // Network Virtual Appliance resource ID + NetworkVirtualApplianceID *string + + // Trust Subnet + TrustSubnet *IPAddressSpace + + // Untrust Subnet + UnTrustSubnet *IPAddressSpace +} diff --git a/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/models_serde.go b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/models_serde.go new file mode 100644 index 000000000000..578fddec7e83 --- /dev/null +++ b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/models_serde.go @@ -0,0 +1,3530 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armpanngfw + +import ( + "encoding/json" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "reflect" +) + +// MarshalJSON implements the json.Marshaller interface for type AdvSecurityObjectListResponse. +func (a AdvSecurityObjectListResponse) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", a.NextLink) + populate(objectMap, "value", a.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type AdvSecurityObjectListResponse. +func (a *AdvSecurityObjectListResponse) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &a.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &a.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type AdvSecurityObjectModel. +func (a AdvSecurityObjectModel) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "entry", a.Entry) + populate(objectMap, "type", a.Type) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type AdvSecurityObjectModel. +func (a *AdvSecurityObjectModel) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "entry": + err = unpopulate(val, "Entry", &a.Entry) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &a.Type) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type AppSeenData. +func (a AppSeenData) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "appSeenList", a.AppSeenList) + populate(objectMap, "count", a.Count) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type AppSeenData. +func (a *AppSeenData) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "appSeenList": + err = unpopulate(val, "AppSeenList", &a.AppSeenList) + delete(rawMsg, key) + case "count": + err = unpopulate(val, "Count", &a.Count) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type AppSeenInfo. +func (a AppSeenInfo) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "category", a.Category) + populate(objectMap, "risk", a.Risk) + populate(objectMap, "standardPorts", a.StandardPorts) + populate(objectMap, "subCategory", a.SubCategory) + populate(objectMap, "tag", a.Tag) + populate(objectMap, "technology", a.Technology) + populate(objectMap, "title", a.Title) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type AppSeenInfo. +func (a *AppSeenInfo) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "category": + err = unpopulate(val, "Category", &a.Category) + delete(rawMsg, key) + case "risk": + err = unpopulate(val, "Risk", &a.Risk) + delete(rawMsg, key) + case "standardPorts": + err = unpopulate(val, "StandardPorts", &a.StandardPorts) + delete(rawMsg, key) + case "subCategory": + err = unpopulate(val, "SubCategory", &a.SubCategory) + delete(rawMsg, key) + case "tag": + err = unpopulate(val, "Tag", &a.Tag) + delete(rawMsg, key) + case "technology": + err = unpopulate(val, "Technology", &a.Technology) + delete(rawMsg, key) + case "title": + err = unpopulate(val, "Title", &a.Title) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ApplicationInsights. +func (a ApplicationInsights) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "id", a.ID) + populate(objectMap, "key", a.Key) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ApplicationInsights. +func (a *ApplicationInsights) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "id": + err = unpopulate(val, "ID", &a.ID) + delete(rawMsg, key) + case "key": + err = unpopulate(val, "Key", &a.Key) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type AzureResourceManagerManagedIdentityProperties. +func (a AzureResourceManagerManagedIdentityProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "principalId", a.PrincipalID) + populate(objectMap, "tenantId", a.TenantID) + populate(objectMap, "type", a.Type) + populate(objectMap, "userAssignedIdentities", a.UserAssignedIdentities) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type AzureResourceManagerManagedIdentityProperties. +func (a *AzureResourceManagerManagedIdentityProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "principalId": + err = unpopulate(val, "PrincipalID", &a.PrincipalID) + delete(rawMsg, key) + case "tenantId": + err = unpopulate(val, "TenantID", &a.TenantID) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &a.Type) + delete(rawMsg, key) + case "userAssignedIdentities": + err = unpopulate(val, "UserAssignedIdentities", &a.UserAssignedIdentities) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type AzureResourceManagerUserAssignedIdentity. +func (a AzureResourceManagerUserAssignedIdentity) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "clientId", a.ClientID) + populate(objectMap, "principalId", a.PrincipalID) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type AzureResourceManagerUserAssignedIdentity. +func (a *AzureResourceManagerUserAssignedIdentity) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "clientId": + err = unpopulate(val, "ClientID", &a.ClientID) + delete(rawMsg, key) + case "principalId": + err = unpopulate(val, "PrincipalID", &a.PrincipalID) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", a, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type Category. +func (c Category) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "feeds", c.Feeds) + populate(objectMap, "urlCustom", c.URLCustom) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type Category. +func (c *Category) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "feeds": + err = unpopulate(val, "Feeds", &c.Feeds) + delete(rawMsg, key) + case "urlCustom": + err = unpopulate(val, "URLCustom", &c.URLCustom) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type CertificateObject. +func (c CertificateObject) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "auditComment", c.AuditComment) + populate(objectMap, "certificateSelfSigned", c.CertificateSelfSigned) + populate(objectMap, "certificateSignerResourceId", c.CertificateSignerResourceID) + populate(objectMap, "description", c.Description) + populate(objectMap, "etag", c.Etag) + populate(objectMap, "provisioningState", c.ProvisioningState) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type CertificateObject. +func (c *CertificateObject) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "auditComment": + err = unpopulate(val, "AuditComment", &c.AuditComment) + delete(rawMsg, key) + case "certificateSelfSigned": + err = unpopulate(val, "CertificateSelfSigned", &c.CertificateSelfSigned) + delete(rawMsg, key) + case "certificateSignerResourceId": + err = unpopulate(val, "CertificateSignerResourceID", &c.CertificateSignerResourceID) + delete(rawMsg, key) + case "description": + err = unpopulate(val, "Description", &c.Description) + delete(rawMsg, key) + case "etag": + err = unpopulate(val, "Etag", &c.Etag) + delete(rawMsg, key) + case "provisioningState": + err = unpopulate(val, "ProvisioningState", &c.ProvisioningState) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type CertificateObjectGlobalRulestackResource. +func (c CertificateObjectGlobalRulestackResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "id", c.ID) + populate(objectMap, "name", c.Name) + populate(objectMap, "properties", c.Properties) + populate(objectMap, "systemData", c.SystemData) + populate(objectMap, "type", c.Type) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type CertificateObjectGlobalRulestackResource. +func (c *CertificateObjectGlobalRulestackResource) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "id": + err = unpopulate(val, "ID", &c.ID) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &c.Name) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &c.Properties) + delete(rawMsg, key) + case "systemData": + err = unpopulate(val, "SystemData", &c.SystemData) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &c.Type) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type CertificateObjectGlobalRulestackResourceListResult. +func (c CertificateObjectGlobalRulestackResourceListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", c.NextLink) + populate(objectMap, "value", c.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type CertificateObjectGlobalRulestackResourceListResult. +func (c *CertificateObjectGlobalRulestackResourceListResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &c.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &c.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type CertificateObjectLocalRulestackResource. +func (c CertificateObjectLocalRulestackResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "id", c.ID) + populate(objectMap, "name", c.Name) + populate(objectMap, "properties", c.Properties) + populate(objectMap, "systemData", c.SystemData) + populate(objectMap, "type", c.Type) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type CertificateObjectLocalRulestackResource. +func (c *CertificateObjectLocalRulestackResource) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "id": + err = unpopulate(val, "ID", &c.ID) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &c.Name) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &c.Properties) + delete(rawMsg, key) + case "systemData": + err = unpopulate(val, "SystemData", &c.SystemData) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &c.Type) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type CertificateObjectLocalRulestackResourceListResult. +func (c CertificateObjectLocalRulestackResourceListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", c.NextLink) + populate(objectMap, "value", c.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type CertificateObjectLocalRulestackResourceListResult. +func (c *CertificateObjectLocalRulestackResourceListResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &c.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &c.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type Changelog. +func (c Changelog) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "changes", c.Changes) + populateTimeRFC3339(objectMap, "lastCommitted", c.LastCommitted) + populateTimeRFC3339(objectMap, "lastModified", c.LastModified) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type Changelog. +func (c *Changelog) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "changes": + err = unpopulate(val, "Changes", &c.Changes) + delete(rawMsg, key) + case "lastCommitted": + err = unpopulateTimeRFC3339(val, "LastCommitted", &c.LastCommitted) + delete(rawMsg, key) + case "lastModified": + err = unpopulateTimeRFC3339(val, "LastModified", &c.LastModified) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type CountriesResponse. +func (c CountriesResponse) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", c.NextLink) + populate(objectMap, "value", c.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type CountriesResponse. +func (c *CountriesResponse) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &c.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &c.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type Country. +func (c Country) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "code", c.Code) + populate(objectMap, "description", c.Description) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type Country. +func (c *Country) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "code": + err = unpopulate(val, "Code", &c.Code) + delete(rawMsg, key) + case "description": + err = unpopulate(val, "Description", &c.Description) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", c, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type DNSSettings. +func (d DNSSettings) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "dnsServers", d.DNSServers) + populate(objectMap, "enableDnsProxy", d.EnableDNSProxy) + populate(objectMap, "enabledDnsType", d.EnabledDNSType) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type DNSSettings. +func (d *DNSSettings) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "dnsServers": + err = unpopulate(val, "DNSServers", &d.DNSServers) + delete(rawMsg, key) + case "enableDnsProxy": + err = unpopulate(val, "EnableDNSProxy", &d.EnableDNSProxy) + delete(rawMsg, key) + case "enabledDnsType": + err = unpopulate(val, "EnabledDNSType", &d.EnabledDNSType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type DestinationAddr. +func (d DestinationAddr) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "cidrs", d.Cidrs) + populate(objectMap, "countries", d.Countries) + populate(objectMap, "feeds", d.Feeds) + populate(objectMap, "fqdnLists", d.FqdnLists) + populate(objectMap, "prefixLists", d.PrefixLists) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type DestinationAddr. +func (d *DestinationAddr) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "cidrs": + err = unpopulate(val, "Cidrs", &d.Cidrs) + delete(rawMsg, key) + case "countries": + err = unpopulate(val, "Countries", &d.Countries) + delete(rawMsg, key) + case "feeds": + err = unpopulate(val, "Feeds", &d.Feeds) + delete(rawMsg, key) + case "fqdnLists": + err = unpopulate(val, "FqdnLists", &d.FqdnLists) + delete(rawMsg, key) + case "prefixLists": + err = unpopulate(val, "PrefixLists", &d.PrefixLists) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", d, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type EndpointConfiguration. +func (e EndpointConfiguration) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "address", e.Address) + populate(objectMap, "port", e.Port) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type EndpointConfiguration. +func (e *EndpointConfiguration) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "address": + err = unpopulate(val, "Address", &e.Address) + delete(rawMsg, key) + case "port": + err = unpopulate(val, "Port", &e.Port) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type EventHub. +func (e EventHub) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "id", e.ID) + populate(objectMap, "name", e.Name) + populate(objectMap, "nameSpace", e.NameSpace) + populate(objectMap, "policyName", e.PolicyName) + populate(objectMap, "subscriptionId", e.SubscriptionID) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type EventHub. +func (e *EventHub) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "id": + err = unpopulate(val, "ID", &e.ID) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &e.Name) + delete(rawMsg, key) + case "nameSpace": + err = unpopulate(val, "NameSpace", &e.NameSpace) + delete(rawMsg, key) + case "policyName": + err = unpopulate(val, "PolicyName", &e.PolicyName) + delete(rawMsg, key) + case "subscriptionId": + err = unpopulate(val, "SubscriptionID", &e.SubscriptionID) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", e, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type FirewallDeploymentProperties. +func (f FirewallDeploymentProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "associatedRulestack", f.AssociatedRulestack) + populate(objectMap, "dnsSettings", f.DNSSettings) + populate(objectMap, "frontEndSettings", f.FrontEndSettings) + populate(objectMap, "isPanoramaManaged", f.IsPanoramaManaged) + populate(objectMap, "marketplaceDetails", f.MarketplaceDetails) + populate(objectMap, "networkProfile", f.NetworkProfile) + populate(objectMap, "panEtag", f.PanEtag) + populate(objectMap, "panoramaConfig", f.PanoramaConfig) + populate(objectMap, "planData", f.PlanData) + populate(objectMap, "provisioningState", f.ProvisioningState) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type FirewallDeploymentProperties. +func (f *FirewallDeploymentProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "associatedRulestack": + err = unpopulate(val, "AssociatedRulestack", &f.AssociatedRulestack) + delete(rawMsg, key) + case "dnsSettings": + err = unpopulate(val, "DNSSettings", &f.DNSSettings) + delete(rawMsg, key) + case "frontEndSettings": + err = unpopulate(val, "FrontEndSettings", &f.FrontEndSettings) + delete(rawMsg, key) + case "isPanoramaManaged": + err = unpopulate(val, "IsPanoramaManaged", &f.IsPanoramaManaged) + delete(rawMsg, key) + case "marketplaceDetails": + err = unpopulate(val, "MarketplaceDetails", &f.MarketplaceDetails) + delete(rawMsg, key) + case "networkProfile": + err = unpopulate(val, "NetworkProfile", &f.NetworkProfile) + delete(rawMsg, key) + case "panEtag": + err = unpopulate(val, "PanEtag", &f.PanEtag) + delete(rawMsg, key) + case "panoramaConfig": + err = unpopulate(val, "PanoramaConfig", &f.PanoramaConfig) + delete(rawMsg, key) + case "planData": + err = unpopulate(val, "PlanData", &f.PlanData) + delete(rawMsg, key) + case "provisioningState": + err = unpopulate(val, "ProvisioningState", &f.ProvisioningState) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type FirewallResource. +func (f FirewallResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "id", f.ID) + populate(objectMap, "identity", f.Identity) + populate(objectMap, "location", f.Location) + populate(objectMap, "name", f.Name) + populate(objectMap, "properties", f.Properties) + populate(objectMap, "systemData", f.SystemData) + populate(objectMap, "tags", f.Tags) + populate(objectMap, "type", f.Type) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type FirewallResource. +func (f *FirewallResource) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "id": + err = unpopulate(val, "ID", &f.ID) + delete(rawMsg, key) + case "identity": + err = unpopulate(val, "Identity", &f.Identity) + delete(rawMsg, key) + case "location": + err = unpopulate(val, "Location", &f.Location) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &f.Name) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &f.Properties) + delete(rawMsg, key) + case "systemData": + err = unpopulate(val, "SystemData", &f.SystemData) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &f.Tags) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &f.Type) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type FirewallResourceListResult. +func (f FirewallResourceListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", f.NextLink) + populate(objectMap, "value", f.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type FirewallResourceListResult. +func (f *FirewallResourceListResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &f.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &f.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type FirewallResourceUpdate. +func (f FirewallResourceUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "identity", f.Identity) + populate(objectMap, "properties", f.Properties) + populate(objectMap, "tags", f.Tags) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type FirewallResourceUpdate. +func (f *FirewallResourceUpdate) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "identity": + err = unpopulate(val, "Identity", &f.Identity) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &f.Properties) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &f.Tags) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type FirewallResourceUpdateProperties. +func (f FirewallResourceUpdateProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "associatedRulestack", f.AssociatedRulestack) + populate(objectMap, "dnsSettings", f.DNSSettings) + populate(objectMap, "frontEndSettings", f.FrontEndSettings) + populate(objectMap, "isPanoramaManaged", f.IsPanoramaManaged) + populate(objectMap, "marketplaceDetails", f.MarketplaceDetails) + populate(objectMap, "networkProfile", f.NetworkProfile) + populate(objectMap, "panEtag", f.PanEtag) + populate(objectMap, "panoramaConfig", f.PanoramaConfig) + populate(objectMap, "planData", f.PlanData) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type FirewallResourceUpdateProperties. +func (f *FirewallResourceUpdateProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "associatedRulestack": + err = unpopulate(val, "AssociatedRulestack", &f.AssociatedRulestack) + delete(rawMsg, key) + case "dnsSettings": + err = unpopulate(val, "DNSSettings", &f.DNSSettings) + delete(rawMsg, key) + case "frontEndSettings": + err = unpopulate(val, "FrontEndSettings", &f.FrontEndSettings) + delete(rawMsg, key) + case "isPanoramaManaged": + err = unpopulate(val, "IsPanoramaManaged", &f.IsPanoramaManaged) + delete(rawMsg, key) + case "marketplaceDetails": + err = unpopulate(val, "MarketplaceDetails", &f.MarketplaceDetails) + delete(rawMsg, key) + case "networkProfile": + err = unpopulate(val, "NetworkProfile", &f.NetworkProfile) + delete(rawMsg, key) + case "panEtag": + err = unpopulate(val, "PanEtag", &f.PanEtag) + delete(rawMsg, key) + case "panoramaConfig": + err = unpopulate(val, "PanoramaConfig", &f.PanoramaConfig) + delete(rawMsg, key) + case "planData": + err = unpopulate(val, "PlanData", &f.PlanData) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type FirewallStatusProperty. +func (f FirewallStatusProperty) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "healthReason", f.HealthReason) + populate(objectMap, "healthStatus", f.HealthStatus) + populate(objectMap, "isPanoramaManaged", f.IsPanoramaManaged) + populate(objectMap, "panoramaStatus", f.PanoramaStatus) + populate(objectMap, "provisioningState", f.ProvisioningState) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type FirewallStatusProperty. +func (f *FirewallStatusProperty) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "healthReason": + err = unpopulate(val, "HealthReason", &f.HealthReason) + delete(rawMsg, key) + case "healthStatus": + err = unpopulate(val, "HealthStatus", &f.HealthStatus) + delete(rawMsg, key) + case "isPanoramaManaged": + err = unpopulate(val, "IsPanoramaManaged", &f.IsPanoramaManaged) + delete(rawMsg, key) + case "panoramaStatus": + err = unpopulate(val, "PanoramaStatus", &f.PanoramaStatus) + delete(rawMsg, key) + case "provisioningState": + err = unpopulate(val, "ProvisioningState", &f.ProvisioningState) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type FirewallStatusResource. +func (f FirewallStatusResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "id", f.ID) + populate(objectMap, "name", f.Name) + populate(objectMap, "properties", f.Properties) + populate(objectMap, "systemData", f.SystemData) + populate(objectMap, "type", f.Type) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type FirewallStatusResource. +func (f *FirewallStatusResource) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "id": + err = unpopulate(val, "ID", &f.ID) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &f.Name) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &f.Properties) + delete(rawMsg, key) + case "systemData": + err = unpopulate(val, "SystemData", &f.SystemData) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &f.Type) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type FirewallStatusResourceListResult. +func (f FirewallStatusResourceListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", f.NextLink) + populate(objectMap, "value", f.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type FirewallStatusResourceListResult. +func (f *FirewallStatusResourceListResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &f.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &f.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type FqdnListGlobalRulestackResource. +func (f FqdnListGlobalRulestackResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "id", f.ID) + populate(objectMap, "name", f.Name) + populate(objectMap, "properties", f.Properties) + populate(objectMap, "systemData", f.SystemData) + populate(objectMap, "type", f.Type) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type FqdnListGlobalRulestackResource. +func (f *FqdnListGlobalRulestackResource) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "id": + err = unpopulate(val, "ID", &f.ID) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &f.Name) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &f.Properties) + delete(rawMsg, key) + case "systemData": + err = unpopulate(val, "SystemData", &f.SystemData) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &f.Type) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type FqdnListGlobalRulestackResourceListResult. +func (f FqdnListGlobalRulestackResourceListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", f.NextLink) + populate(objectMap, "value", f.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type FqdnListGlobalRulestackResourceListResult. +func (f *FqdnListGlobalRulestackResourceListResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &f.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &f.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type FqdnListLocalRulestackResource. +func (f FqdnListLocalRulestackResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "id", f.ID) + populate(objectMap, "name", f.Name) + populate(objectMap, "properties", f.Properties) + populate(objectMap, "systemData", f.SystemData) + populate(objectMap, "type", f.Type) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type FqdnListLocalRulestackResource. +func (f *FqdnListLocalRulestackResource) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "id": + err = unpopulate(val, "ID", &f.ID) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &f.Name) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &f.Properties) + delete(rawMsg, key) + case "systemData": + err = unpopulate(val, "SystemData", &f.SystemData) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &f.Type) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type FqdnListLocalRulestackResourceListResult. +func (f FqdnListLocalRulestackResourceListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", f.NextLink) + populate(objectMap, "value", f.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type FqdnListLocalRulestackResourceListResult. +func (f *FqdnListLocalRulestackResourceListResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &f.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &f.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type FqdnObject. +func (f FqdnObject) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "auditComment", f.AuditComment) + populate(objectMap, "description", f.Description) + populate(objectMap, "etag", f.Etag) + populate(objectMap, "fqdnList", f.FqdnList) + populate(objectMap, "provisioningState", f.ProvisioningState) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type FqdnObject. +func (f *FqdnObject) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "auditComment": + err = unpopulate(val, "AuditComment", &f.AuditComment) + delete(rawMsg, key) + case "description": + err = unpopulate(val, "Description", &f.Description) + delete(rawMsg, key) + case "etag": + err = unpopulate(val, "Etag", &f.Etag) + delete(rawMsg, key) + case "fqdnList": + err = unpopulate(val, "FqdnList", &f.FqdnList) + delete(rawMsg, key) + case "provisioningState": + err = unpopulate(val, "ProvisioningState", &f.ProvisioningState) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type FrontendSetting. +func (f FrontendSetting) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "backendConfiguration", f.BackendConfiguration) + populate(objectMap, "frontendConfiguration", f.FrontendConfiguration) + populate(objectMap, "name", f.Name) + populate(objectMap, "protocol", f.Protocol) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type FrontendSetting. +func (f *FrontendSetting) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "backendConfiguration": + err = unpopulate(val, "BackendConfiguration", &f.BackendConfiguration) + delete(rawMsg, key) + case "frontendConfiguration": + err = unpopulate(val, "FrontendConfiguration", &f.FrontendConfiguration) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &f.Name) + delete(rawMsg, key) + case "protocol": + err = unpopulate(val, "Protocol", &f.Protocol) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", f, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type GlobalRulestackInfo. +func (g GlobalRulestackInfo) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "azureId", g.AzureID) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type GlobalRulestackInfo. +func (g *GlobalRulestackInfo) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", g, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "azureId": + err = unpopulate(val, "AzureID", &g.AzureID) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", g, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type GlobalRulestackResource. +func (g GlobalRulestackResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "id", g.ID) + populate(objectMap, "identity", g.Identity) + populate(objectMap, "location", g.Location) + populate(objectMap, "name", g.Name) + populate(objectMap, "properties", g.Properties) + populate(objectMap, "systemData", g.SystemData) + populate(objectMap, "type", g.Type) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type GlobalRulestackResource. +func (g *GlobalRulestackResource) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", g, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "id": + err = unpopulate(val, "ID", &g.ID) + delete(rawMsg, key) + case "identity": + err = unpopulate(val, "Identity", &g.Identity) + delete(rawMsg, key) + case "location": + err = unpopulate(val, "Location", &g.Location) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &g.Name) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &g.Properties) + delete(rawMsg, key) + case "systemData": + err = unpopulate(val, "SystemData", &g.SystemData) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &g.Type) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", g, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type GlobalRulestackResourceListResult. +func (g GlobalRulestackResourceListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", g.NextLink) + populate(objectMap, "value", g.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type GlobalRulestackResourceListResult. +func (g *GlobalRulestackResourceListResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", g, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &g.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &g.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", g, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type GlobalRulestackResourceUpdate. +func (g GlobalRulestackResourceUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "identity", g.Identity) + populate(objectMap, "location", g.Location) + populate(objectMap, "properties", g.Properties) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type GlobalRulestackResourceUpdate. +func (g *GlobalRulestackResourceUpdate) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", g, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "identity": + err = unpopulate(val, "Identity", &g.Identity) + delete(rawMsg, key) + case "location": + err = unpopulate(val, "Location", &g.Location) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &g.Properties) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", g, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type GlobalRulestackResourceUpdateProperties. +func (g GlobalRulestackResourceUpdateProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "associatedSubscriptions", g.AssociatedSubscriptions) + populate(objectMap, "defaultMode", g.DefaultMode) + populate(objectMap, "description", g.Description) + populate(objectMap, "minAppIdVersion", g.MinAppIDVersion) + populate(objectMap, "panEtag", g.PanEtag) + populate(objectMap, "panLocation", g.PanLocation) + populate(objectMap, "scope", g.Scope) + populate(objectMap, "securityServices", g.SecurityServices) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type GlobalRulestackResourceUpdateProperties. +func (g *GlobalRulestackResourceUpdateProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", g, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "associatedSubscriptions": + err = unpopulate(val, "AssociatedSubscriptions", &g.AssociatedSubscriptions) + delete(rawMsg, key) + case "defaultMode": + err = unpopulate(val, "DefaultMode", &g.DefaultMode) + delete(rawMsg, key) + case "description": + err = unpopulate(val, "Description", &g.Description) + delete(rawMsg, key) + case "minAppIdVersion": + err = unpopulate(val, "MinAppIDVersion", &g.MinAppIDVersion) + delete(rawMsg, key) + case "panEtag": + err = unpopulate(val, "PanEtag", &g.PanEtag) + delete(rawMsg, key) + case "panLocation": + err = unpopulate(val, "PanLocation", &g.PanLocation) + delete(rawMsg, key) + case "scope": + err = unpopulate(val, "Scope", &g.Scope) + delete(rawMsg, key) + case "securityServices": + err = unpopulate(val, "SecurityServices", &g.SecurityServices) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", g, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type IPAddress. +func (i IPAddress) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "address", i.Address) + populate(objectMap, "resourceId", i.ResourceID) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type IPAddress. +func (i *IPAddress) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "address": + err = unpopulate(val, "Address", &i.Address) + delete(rawMsg, key) + case "resourceId": + err = unpopulate(val, "ResourceID", &i.ResourceID) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type IPAddressSpace. +func (i IPAddressSpace) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "addressSpace", i.AddressSpace) + populate(objectMap, "resourceId", i.ResourceID) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type IPAddressSpace. +func (i *IPAddressSpace) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "addressSpace": + err = unpopulate(val, "AddressSpace", &i.AddressSpace) + delete(rawMsg, key) + case "resourceId": + err = unpopulate(val, "ResourceID", &i.ResourceID) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", i, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ListAppIDResponse. +func (l ListAppIDResponse) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", l.NextLink) + populate(objectMap, "value", l.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ListAppIDResponse. +func (l *ListAppIDResponse) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &l.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &l.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type ListFirewallsResponse. +func (l ListFirewallsResponse) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", l.NextLink) + populate(objectMap, "value", l.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type ListFirewallsResponse. +func (l *ListFirewallsResponse) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &l.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &l.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type LocalRulesResource. +func (l LocalRulesResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "id", l.ID) + populate(objectMap, "name", l.Name) + populate(objectMap, "properties", l.Properties) + populate(objectMap, "systemData", l.SystemData) + populate(objectMap, "type", l.Type) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type LocalRulesResource. +func (l *LocalRulesResource) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "id": + err = unpopulate(val, "ID", &l.ID) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &l.Name) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &l.Properties) + delete(rawMsg, key) + case "systemData": + err = unpopulate(val, "SystemData", &l.SystemData) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &l.Type) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type LocalRulesResourceListResult. +func (l LocalRulesResourceListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", l.NextLink) + populate(objectMap, "value", l.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type LocalRulesResourceListResult. +func (l *LocalRulesResourceListResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &l.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &l.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type LocalRulestackResource. +func (l LocalRulestackResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "id", l.ID) + populate(objectMap, "identity", l.Identity) + populate(objectMap, "location", l.Location) + populate(objectMap, "name", l.Name) + populate(objectMap, "properties", l.Properties) + populate(objectMap, "systemData", l.SystemData) + populate(objectMap, "tags", l.Tags) + populate(objectMap, "type", l.Type) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type LocalRulestackResource. +func (l *LocalRulestackResource) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "id": + err = unpopulate(val, "ID", &l.ID) + delete(rawMsg, key) + case "identity": + err = unpopulate(val, "Identity", &l.Identity) + delete(rawMsg, key) + case "location": + err = unpopulate(val, "Location", &l.Location) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &l.Name) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &l.Properties) + delete(rawMsg, key) + case "systemData": + err = unpopulate(val, "SystemData", &l.SystemData) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &l.Tags) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &l.Type) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type LocalRulestackResourceListResult. +func (l LocalRulestackResourceListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", l.NextLink) + populate(objectMap, "value", l.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type LocalRulestackResourceListResult. +func (l *LocalRulestackResourceListResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &l.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &l.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type LocalRulestackResourceUpdate. +func (l LocalRulestackResourceUpdate) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "identity", l.Identity) + populate(objectMap, "properties", l.Properties) + populate(objectMap, "tags", l.Tags) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type LocalRulestackResourceUpdate. +func (l *LocalRulestackResourceUpdate) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "identity": + err = unpopulate(val, "Identity", &l.Identity) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &l.Properties) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &l.Tags) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type LocalRulestackResourceUpdateProperties. +func (l LocalRulestackResourceUpdateProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "associatedSubscriptions", l.AssociatedSubscriptions) + populate(objectMap, "defaultMode", l.DefaultMode) + populate(objectMap, "description", l.Description) + populate(objectMap, "minAppIdVersion", l.MinAppIDVersion) + populate(objectMap, "panEtag", l.PanEtag) + populate(objectMap, "panLocation", l.PanLocation) + populate(objectMap, "scope", l.Scope) + populate(objectMap, "securityServices", l.SecurityServices) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type LocalRulestackResourceUpdateProperties. +func (l *LocalRulestackResourceUpdateProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "associatedSubscriptions": + err = unpopulate(val, "AssociatedSubscriptions", &l.AssociatedSubscriptions) + delete(rawMsg, key) + case "defaultMode": + err = unpopulate(val, "DefaultMode", &l.DefaultMode) + delete(rawMsg, key) + case "description": + err = unpopulate(val, "Description", &l.Description) + delete(rawMsg, key) + case "minAppIdVersion": + err = unpopulate(val, "MinAppIDVersion", &l.MinAppIDVersion) + delete(rawMsg, key) + case "panEtag": + err = unpopulate(val, "PanEtag", &l.PanEtag) + delete(rawMsg, key) + case "panLocation": + err = unpopulate(val, "PanLocation", &l.PanLocation) + delete(rawMsg, key) + case "scope": + err = unpopulate(val, "Scope", &l.Scope) + delete(rawMsg, key) + case "securityServices": + err = unpopulate(val, "SecurityServices", &l.SecurityServices) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type LogDestination. +func (l LogDestination) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "eventHubConfigurations", l.EventHubConfigurations) + populate(objectMap, "monitorConfigurations", l.MonitorConfigurations) + populate(objectMap, "storageConfigurations", l.StorageConfigurations) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type LogDestination. +func (l *LogDestination) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "eventHubConfigurations": + err = unpopulate(val, "EventHubConfigurations", &l.EventHubConfigurations) + delete(rawMsg, key) + case "monitorConfigurations": + err = unpopulate(val, "MonitorConfigurations", &l.MonitorConfigurations) + delete(rawMsg, key) + case "storageConfigurations": + err = unpopulate(val, "StorageConfigurations", &l.StorageConfigurations) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type LogSettings. +func (l LogSettings) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "applicationInsights", l.ApplicationInsights) + populate(objectMap, "commonDestination", l.CommonDestination) + populate(objectMap, "decryptLogDestination", l.DecryptLogDestination) + populate(objectMap, "logOption", l.LogOption) + populate(objectMap, "logType", l.LogType) + populate(objectMap, "threatLogDestination", l.ThreatLogDestination) + populate(objectMap, "trafficLogDestination", l.TrafficLogDestination) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type LogSettings. +func (l *LogSettings) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "applicationInsights": + err = unpopulate(val, "ApplicationInsights", &l.ApplicationInsights) + delete(rawMsg, key) + case "commonDestination": + err = unpopulate(val, "CommonDestination", &l.CommonDestination) + delete(rawMsg, key) + case "decryptLogDestination": + err = unpopulate(val, "DecryptLogDestination", &l.DecryptLogDestination) + delete(rawMsg, key) + case "logOption": + err = unpopulate(val, "LogOption", &l.LogOption) + delete(rawMsg, key) + case "logType": + err = unpopulate(val, "LogType", &l.LogType) + delete(rawMsg, key) + case "threatLogDestination": + err = unpopulate(val, "ThreatLogDestination", &l.ThreatLogDestination) + delete(rawMsg, key) + case "trafficLogDestination": + err = unpopulate(val, "TrafficLogDestination", &l.TrafficLogDestination) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", l, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type MarketplaceDetails. +func (m MarketplaceDetails) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "marketplaceSubscriptionId", m.MarketplaceSubscriptionID) + populate(objectMap, "marketplaceSubscriptionStatus", m.MarketplaceSubscriptionStatus) + populate(objectMap, "offerId", m.OfferID) + populate(objectMap, "publisherId", m.PublisherID) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type MarketplaceDetails. +func (m *MarketplaceDetails) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "marketplaceSubscriptionId": + err = unpopulate(val, "MarketplaceSubscriptionID", &m.MarketplaceSubscriptionID) + delete(rawMsg, key) + case "marketplaceSubscriptionStatus": + err = unpopulate(val, "MarketplaceSubscriptionStatus", &m.MarketplaceSubscriptionStatus) + delete(rawMsg, key) + case "offerId": + err = unpopulate(val, "OfferID", &m.OfferID) + delete(rawMsg, key) + case "publisherId": + err = unpopulate(val, "PublisherID", &m.PublisherID) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type MonitorLog. +func (m MonitorLog) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "id", m.ID) + populate(objectMap, "primaryKey", m.PrimaryKey) + populate(objectMap, "secondaryKey", m.SecondaryKey) + populate(objectMap, "subscriptionId", m.SubscriptionID) + populate(objectMap, "workspace", m.Workspace) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type MonitorLog. +func (m *MonitorLog) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "id": + err = unpopulate(val, "ID", &m.ID) + delete(rawMsg, key) + case "primaryKey": + err = unpopulate(val, "PrimaryKey", &m.PrimaryKey) + delete(rawMsg, key) + case "secondaryKey": + err = unpopulate(val, "SecondaryKey", &m.SecondaryKey) + delete(rawMsg, key) + case "subscriptionId": + err = unpopulate(val, "SubscriptionID", &m.SubscriptionID) + delete(rawMsg, key) + case "workspace": + err = unpopulate(val, "Workspace", &m.Workspace) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", m, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type NameDescriptionObject. +func (n NameDescriptionObject) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "description", n.Description) + populate(objectMap, "name", n.Name) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type NameDescriptionObject. +func (n *NameDescriptionObject) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", n, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "description": + err = unpopulate(val, "Description", &n.Description) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &n.Name) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", n, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type NetworkProfile. +func (n NetworkProfile) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "egressNatIp", n.EgressNatIP) + populate(objectMap, "enableEgressNat", n.EnableEgressNat) + populate(objectMap, "networkType", n.NetworkType) + populate(objectMap, "publicIps", n.PublicIPs) + populate(objectMap, "vnetConfiguration", n.VnetConfiguration) + populate(objectMap, "vwanConfiguration", n.VwanConfiguration) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type NetworkProfile. +func (n *NetworkProfile) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", n, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "egressNatIp": + err = unpopulate(val, "EgressNatIP", &n.EgressNatIP) + delete(rawMsg, key) + case "enableEgressNat": + err = unpopulate(val, "EnableEgressNat", &n.EnableEgressNat) + delete(rawMsg, key) + case "networkType": + err = unpopulate(val, "NetworkType", &n.NetworkType) + delete(rawMsg, key) + case "publicIps": + err = unpopulate(val, "PublicIPs", &n.PublicIPs) + delete(rawMsg, key) + case "vnetConfiguration": + err = unpopulate(val, "VnetConfiguration", &n.VnetConfiguration) + delete(rawMsg, key) + case "vwanConfiguration": + err = unpopulate(val, "VwanConfiguration", &n.VwanConfiguration) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", n, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type Operation. +func (o Operation) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "actionType", o.ActionType) + populate(objectMap, "display", o.Display) + populate(objectMap, "isDataAction", o.IsDataAction) + populate(objectMap, "name", o.Name) + populate(objectMap, "origin", o.Origin) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type Operation. +func (o *Operation) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", o, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "actionType": + err = unpopulate(val, "ActionType", &o.ActionType) + delete(rawMsg, key) + case "display": + err = unpopulate(val, "Display", &o.Display) + delete(rawMsg, key) + case "isDataAction": + err = unpopulate(val, "IsDataAction", &o.IsDataAction) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &o.Name) + delete(rawMsg, key) + case "origin": + err = unpopulate(val, "Origin", &o.Origin) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", o, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type OperationDisplay. +func (o OperationDisplay) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "description", o.Description) + populate(objectMap, "operation", o.Operation) + populate(objectMap, "provider", o.Provider) + populate(objectMap, "resource", o.Resource) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type OperationDisplay. +func (o *OperationDisplay) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", o, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "description": + err = unpopulate(val, "Description", &o.Description) + delete(rawMsg, key) + case "operation": + err = unpopulate(val, "Operation", &o.Operation) + delete(rawMsg, key) + case "provider": + err = unpopulate(val, "Provider", &o.Provider) + delete(rawMsg, key) + case "resource": + err = unpopulate(val, "Resource", &o.Resource) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", o, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type OperationListResult. +func (o OperationListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", o.NextLink) + populate(objectMap, "value", o.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type OperationListResult. +func (o *OperationListResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", o, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &o.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &o.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", o, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type PanoramaConfig. +func (p PanoramaConfig) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "cgName", p.CgName) + populate(objectMap, "configString", p.ConfigString) + populate(objectMap, "dgName", p.DgName) + populate(objectMap, "hostName", p.HostName) + populate(objectMap, "panoramaServer", p.PanoramaServer) + populate(objectMap, "panoramaServer2", p.PanoramaServer2) + populate(objectMap, "tplName", p.TplName) + populate(objectMap, "vmAuthKey", p.VMAuthKey) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type PanoramaConfig. +func (p *PanoramaConfig) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "cgName": + err = unpopulate(val, "CgName", &p.CgName) + delete(rawMsg, key) + case "configString": + err = unpopulate(val, "ConfigString", &p.ConfigString) + delete(rawMsg, key) + case "dgName": + err = unpopulate(val, "DgName", &p.DgName) + delete(rawMsg, key) + case "hostName": + err = unpopulate(val, "HostName", &p.HostName) + delete(rawMsg, key) + case "panoramaServer": + err = unpopulate(val, "PanoramaServer", &p.PanoramaServer) + delete(rawMsg, key) + case "panoramaServer2": + err = unpopulate(val, "PanoramaServer2", &p.PanoramaServer2) + delete(rawMsg, key) + case "tplName": + err = unpopulate(val, "TplName", &p.TplName) + delete(rawMsg, key) + case "vmAuthKey": + err = unpopulate(val, "VMAuthKey", &p.VMAuthKey) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type PanoramaStatus. +func (p PanoramaStatus) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "panoramaServer2Status", p.PanoramaServer2Status) + populate(objectMap, "panoramaServerStatus", p.PanoramaServerStatus) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type PanoramaStatus. +func (p *PanoramaStatus) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "panoramaServer2Status": + err = unpopulate(val, "PanoramaServer2Status", &p.PanoramaServer2Status) + delete(rawMsg, key) + case "panoramaServerStatus": + err = unpopulate(val, "PanoramaServerStatus", &p.PanoramaServerStatus) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type PlanData. +func (p PlanData) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "billingCycle", p.BillingCycle) + populateTimeRFC3339(objectMap, "effectiveDate", p.EffectiveDate) + populate(objectMap, "planId", p.PlanID) + populate(objectMap, "usageType", p.UsageType) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type PlanData. +func (p *PlanData) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "billingCycle": + err = unpopulate(val, "BillingCycle", &p.BillingCycle) + delete(rawMsg, key) + case "effectiveDate": + err = unpopulateTimeRFC3339(val, "EffectiveDate", &p.EffectiveDate) + delete(rawMsg, key) + case "planId": + err = unpopulate(val, "PlanID", &p.PlanID) + delete(rawMsg, key) + case "usageType": + err = unpopulate(val, "UsageType", &p.UsageType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type PostRulesResource. +func (p PostRulesResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "id", p.ID) + populate(objectMap, "name", p.Name) + populate(objectMap, "properties", p.Properties) + populate(objectMap, "systemData", p.SystemData) + populate(objectMap, "type", p.Type) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type PostRulesResource. +func (p *PostRulesResource) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "id": + err = unpopulate(val, "ID", &p.ID) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &p.Name) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &p.Properties) + delete(rawMsg, key) + case "systemData": + err = unpopulate(val, "SystemData", &p.SystemData) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &p.Type) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type PostRulesResourceListResult. +func (p PostRulesResourceListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", p.NextLink) + populate(objectMap, "value", p.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type PostRulesResourceListResult. +func (p *PostRulesResourceListResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &p.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &p.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type PreRulesResource. +func (p PreRulesResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "id", p.ID) + populate(objectMap, "name", p.Name) + populate(objectMap, "properties", p.Properties) + populate(objectMap, "systemData", p.SystemData) + populate(objectMap, "type", p.Type) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type PreRulesResource. +func (p *PreRulesResource) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "id": + err = unpopulate(val, "ID", &p.ID) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &p.Name) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &p.Properties) + delete(rawMsg, key) + case "systemData": + err = unpopulate(val, "SystemData", &p.SystemData) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &p.Type) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type PreRulesResourceListResult. +func (p PreRulesResourceListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", p.NextLink) + populate(objectMap, "value", p.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type PreRulesResourceListResult. +func (p *PreRulesResourceListResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &p.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &p.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type PredefinedURLCategoriesResponse. +func (p PredefinedURLCategoriesResponse) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", p.NextLink) + populate(objectMap, "value", p.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type PredefinedURLCategoriesResponse. +func (p *PredefinedURLCategoriesResponse) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &p.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &p.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type PredefinedURLCategory. +func (p PredefinedURLCategory) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "action", p.Action) + populate(objectMap, "name", p.Name) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type PredefinedURLCategory. +func (p *PredefinedURLCategory) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "action": + err = unpopulate(val, "Action", &p.Action) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &p.Name) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type PrefixListGlobalRulestackResource. +func (p PrefixListGlobalRulestackResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "id", p.ID) + populate(objectMap, "name", p.Name) + populate(objectMap, "properties", p.Properties) + populate(objectMap, "systemData", p.SystemData) + populate(objectMap, "type", p.Type) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type PrefixListGlobalRulestackResource. +func (p *PrefixListGlobalRulestackResource) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "id": + err = unpopulate(val, "ID", &p.ID) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &p.Name) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &p.Properties) + delete(rawMsg, key) + case "systemData": + err = unpopulate(val, "SystemData", &p.SystemData) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &p.Type) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type PrefixListGlobalRulestackResourceListResult. +func (p PrefixListGlobalRulestackResourceListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", p.NextLink) + populate(objectMap, "value", p.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type PrefixListGlobalRulestackResourceListResult. +func (p *PrefixListGlobalRulestackResourceListResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &p.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &p.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type PrefixListResource. +func (p PrefixListResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "id", p.ID) + populate(objectMap, "name", p.Name) + populate(objectMap, "properties", p.Properties) + populate(objectMap, "systemData", p.SystemData) + populate(objectMap, "type", p.Type) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type PrefixListResource. +func (p *PrefixListResource) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "id": + err = unpopulate(val, "ID", &p.ID) + delete(rawMsg, key) + case "name": + err = unpopulate(val, "Name", &p.Name) + delete(rawMsg, key) + case "properties": + err = unpopulate(val, "Properties", &p.Properties) + delete(rawMsg, key) + case "systemData": + err = unpopulate(val, "SystemData", &p.SystemData) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &p.Type) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type PrefixListResourceListResult. +func (p PrefixListResourceListResult) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", p.NextLink) + populate(objectMap, "value", p.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type PrefixListResourceListResult. +func (p *PrefixListResourceListResult) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &p.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &p.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type PrefixObject. +func (p PrefixObject) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "auditComment", p.AuditComment) + populate(objectMap, "description", p.Description) + populate(objectMap, "etag", p.Etag) + populate(objectMap, "prefixList", p.PrefixList) + populate(objectMap, "provisioningState", p.ProvisioningState) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type PrefixObject. +func (p *PrefixObject) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "auditComment": + err = unpopulate(val, "AuditComment", &p.AuditComment) + delete(rawMsg, key) + case "description": + err = unpopulate(val, "Description", &p.Description) + delete(rawMsg, key) + case "etag": + err = unpopulate(val, "Etag", &p.Etag) + delete(rawMsg, key) + case "prefixList": + err = unpopulate(val, "PrefixList", &p.PrefixList) + delete(rawMsg, key) + case "provisioningState": + err = unpopulate(val, "ProvisioningState", &p.ProvisioningState) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", p, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type RuleCounter. +func (r RuleCounter) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "appSeen", r.AppSeen) + populate(objectMap, "firewallName", r.FirewallName) + populate(objectMap, "hitCount", r.HitCount) + populateTimeRFC3339(objectMap, "lastUpdatedTimestamp", r.LastUpdatedTimestamp) + populate(objectMap, "priority", r.Priority) + populateTimeRFC3339(objectMap, "requestTimestamp", r.RequestTimestamp) + populate(objectMap, "ruleListName", r.RuleListName) + populate(objectMap, "ruleName", r.RuleName) + populate(objectMap, "ruleStackName", r.RuleStackName) + populateTimeRFC3339(objectMap, "timestamp", r.Timestamp) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type RuleCounter. +func (r *RuleCounter) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "appSeen": + err = unpopulate(val, "AppSeen", &r.AppSeen) + delete(rawMsg, key) + case "firewallName": + err = unpopulate(val, "FirewallName", &r.FirewallName) + delete(rawMsg, key) + case "hitCount": + err = unpopulate(val, "HitCount", &r.HitCount) + delete(rawMsg, key) + case "lastUpdatedTimestamp": + err = unpopulateTimeRFC3339(val, "LastUpdatedTimestamp", &r.LastUpdatedTimestamp) + delete(rawMsg, key) + case "priority": + err = unpopulate(val, "Priority", &r.Priority) + delete(rawMsg, key) + case "requestTimestamp": + err = unpopulateTimeRFC3339(val, "RequestTimestamp", &r.RequestTimestamp) + delete(rawMsg, key) + case "ruleListName": + err = unpopulate(val, "RuleListName", &r.RuleListName) + delete(rawMsg, key) + case "ruleName": + err = unpopulate(val, "RuleName", &r.RuleName) + delete(rawMsg, key) + case "ruleStackName": + err = unpopulate(val, "RuleStackName", &r.RuleStackName) + delete(rawMsg, key) + case "timestamp": + err = unpopulateTimeRFC3339(val, "Timestamp", &r.Timestamp) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type RuleCounterReset. +func (r RuleCounterReset) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "firewallName", r.FirewallName) + populate(objectMap, "priority", r.Priority) + populate(objectMap, "ruleListName", r.RuleListName) + populate(objectMap, "ruleName", r.RuleName) + populate(objectMap, "ruleStackName", r.RuleStackName) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type RuleCounterReset. +func (r *RuleCounterReset) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "firewallName": + err = unpopulate(val, "FirewallName", &r.FirewallName) + delete(rawMsg, key) + case "priority": + err = unpopulate(val, "Priority", &r.Priority) + delete(rawMsg, key) + case "ruleListName": + err = unpopulate(val, "RuleListName", &r.RuleListName) + delete(rawMsg, key) + case "ruleName": + err = unpopulate(val, "RuleName", &r.RuleName) + delete(rawMsg, key) + case "ruleStackName": + err = unpopulate(val, "RuleStackName", &r.RuleStackName) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type RuleEntry. +func (r RuleEntry) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "actionType", r.ActionType) + populate(objectMap, "applications", r.Applications) + populate(objectMap, "auditComment", r.AuditComment) + populate(objectMap, "category", r.Category) + populate(objectMap, "decryptionRuleType", r.DecryptionRuleType) + populate(objectMap, "description", r.Description) + populate(objectMap, "destination", r.Destination) + populate(objectMap, "enableLogging", r.EnableLogging) + populate(objectMap, "etag", r.Etag) + populate(objectMap, "inboundInspectionCertificate", r.InboundInspectionCertificate) + populate(objectMap, "negateDestination", r.NegateDestination) + populate(objectMap, "negateSource", r.NegateSource) + populate(objectMap, "priority", r.Priority) + populate(objectMap, "protocol", r.Protocol) + populate(objectMap, "protocolPortList", r.ProtocolPortList) + populate(objectMap, "provisioningState", r.ProvisioningState) + populate(objectMap, "ruleName", r.RuleName) + populate(objectMap, "ruleState", r.RuleState) + populate(objectMap, "source", r.Source) + populate(objectMap, "tags", r.Tags) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type RuleEntry. +func (r *RuleEntry) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "actionType": + err = unpopulate(val, "ActionType", &r.ActionType) + delete(rawMsg, key) + case "applications": + err = unpopulate(val, "Applications", &r.Applications) + delete(rawMsg, key) + case "auditComment": + err = unpopulate(val, "AuditComment", &r.AuditComment) + delete(rawMsg, key) + case "category": + err = unpopulate(val, "Category", &r.Category) + delete(rawMsg, key) + case "decryptionRuleType": + err = unpopulate(val, "DecryptionRuleType", &r.DecryptionRuleType) + delete(rawMsg, key) + case "description": + err = unpopulate(val, "Description", &r.Description) + delete(rawMsg, key) + case "destination": + err = unpopulate(val, "Destination", &r.Destination) + delete(rawMsg, key) + case "enableLogging": + err = unpopulate(val, "EnableLogging", &r.EnableLogging) + delete(rawMsg, key) + case "etag": + err = unpopulate(val, "Etag", &r.Etag) + delete(rawMsg, key) + case "inboundInspectionCertificate": + err = unpopulate(val, "InboundInspectionCertificate", &r.InboundInspectionCertificate) + delete(rawMsg, key) + case "negateDestination": + err = unpopulate(val, "NegateDestination", &r.NegateDestination) + delete(rawMsg, key) + case "negateSource": + err = unpopulate(val, "NegateSource", &r.NegateSource) + delete(rawMsg, key) + case "priority": + err = unpopulate(val, "Priority", &r.Priority) + delete(rawMsg, key) + case "protocol": + err = unpopulate(val, "Protocol", &r.Protocol) + delete(rawMsg, key) + case "protocolPortList": + err = unpopulate(val, "ProtocolPortList", &r.ProtocolPortList) + delete(rawMsg, key) + case "provisioningState": + err = unpopulate(val, "ProvisioningState", &r.ProvisioningState) + delete(rawMsg, key) + case "ruleName": + err = unpopulate(val, "RuleName", &r.RuleName) + delete(rawMsg, key) + case "ruleState": + err = unpopulate(val, "RuleState", &r.RuleState) + delete(rawMsg, key) + case "source": + err = unpopulate(val, "Source", &r.Source) + delete(rawMsg, key) + case "tags": + err = unpopulate(val, "Tags", &r.Tags) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type RulestackDetails. +func (r RulestackDetails) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "location", r.Location) + populate(objectMap, "resourceId", r.ResourceID) + populate(objectMap, "rulestackId", r.RulestackID) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type RulestackDetails. +func (r *RulestackDetails) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "location": + err = unpopulate(val, "Location", &r.Location) + delete(rawMsg, key) + case "resourceId": + err = unpopulate(val, "ResourceID", &r.ResourceID) + delete(rawMsg, key) + case "rulestackId": + err = unpopulate(val, "RulestackID", &r.RulestackID) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type RulestackProperties. +func (r RulestackProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "associatedSubscriptions", r.AssociatedSubscriptions) + populate(objectMap, "defaultMode", r.DefaultMode) + populate(objectMap, "description", r.Description) + populate(objectMap, "minAppIdVersion", r.MinAppIDVersion) + populate(objectMap, "panEtag", r.PanEtag) + populate(objectMap, "panLocation", r.PanLocation) + populate(objectMap, "provisioningState", r.ProvisioningState) + populate(objectMap, "scope", r.Scope) + populate(objectMap, "securityServices", r.SecurityServices) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type RulestackProperties. +func (r *RulestackProperties) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "associatedSubscriptions": + err = unpopulate(val, "AssociatedSubscriptions", &r.AssociatedSubscriptions) + delete(rawMsg, key) + case "defaultMode": + err = unpopulate(val, "DefaultMode", &r.DefaultMode) + delete(rawMsg, key) + case "description": + err = unpopulate(val, "Description", &r.Description) + delete(rawMsg, key) + case "minAppIdVersion": + err = unpopulate(val, "MinAppIDVersion", &r.MinAppIDVersion) + delete(rawMsg, key) + case "panEtag": + err = unpopulate(val, "PanEtag", &r.PanEtag) + delete(rawMsg, key) + case "panLocation": + err = unpopulate(val, "PanLocation", &r.PanLocation) + delete(rawMsg, key) + case "provisioningState": + err = unpopulate(val, "ProvisioningState", &r.ProvisioningState) + delete(rawMsg, key) + case "scope": + err = unpopulate(val, "Scope", &r.Scope) + delete(rawMsg, key) + case "securityServices": + err = unpopulate(val, "SecurityServices", &r.SecurityServices) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", r, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type SecurityServices. +func (s SecurityServices) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "antiSpywareProfile", s.AntiSpywareProfile) + populate(objectMap, "antiVirusProfile", s.AntiVirusProfile) + populate(objectMap, "dnsSubscription", s.DNSSubscription) + populate(objectMap, "fileBlockingProfile", s.FileBlockingProfile) + populate(objectMap, "outboundTrustCertificate", s.OutboundTrustCertificate) + populate(objectMap, "outboundUnTrustCertificate", s.OutboundUnTrustCertificate) + populate(objectMap, "urlFilteringProfile", s.URLFilteringProfile) + populate(objectMap, "vulnerabilityProfile", s.VulnerabilityProfile) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type SecurityServices. +func (s *SecurityServices) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "antiSpywareProfile": + err = unpopulate(val, "AntiSpywareProfile", &s.AntiSpywareProfile) + delete(rawMsg, key) + case "antiVirusProfile": + err = unpopulate(val, "AntiVirusProfile", &s.AntiVirusProfile) + delete(rawMsg, key) + case "dnsSubscription": + err = unpopulate(val, "DNSSubscription", &s.DNSSubscription) + delete(rawMsg, key) + case "fileBlockingProfile": + err = unpopulate(val, "FileBlockingProfile", &s.FileBlockingProfile) + delete(rawMsg, key) + case "outboundTrustCertificate": + err = unpopulate(val, "OutboundTrustCertificate", &s.OutboundTrustCertificate) + delete(rawMsg, key) + case "outboundUnTrustCertificate": + err = unpopulate(val, "OutboundUnTrustCertificate", &s.OutboundUnTrustCertificate) + delete(rawMsg, key) + case "urlFilteringProfile": + err = unpopulate(val, "URLFilteringProfile", &s.URLFilteringProfile) + delete(rawMsg, key) + case "vulnerabilityProfile": + err = unpopulate(val, "VulnerabilityProfile", &s.VulnerabilityProfile) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type SecurityServicesResponse. +func (s SecurityServicesResponse) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "nextLink", s.NextLink) + populate(objectMap, "value", s.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type SecurityServicesResponse. +func (s *SecurityServicesResponse) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "nextLink": + err = unpopulate(val, "NextLink", &s.NextLink) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &s.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type SecurityServicesTypeList. +func (s SecurityServicesTypeList) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "entry", s.Entry) + populate(objectMap, "type", s.Type) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type SecurityServicesTypeList. +func (s *SecurityServicesTypeList) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "entry": + err = unpopulate(val, "Entry", &s.Entry) + delete(rawMsg, key) + case "type": + err = unpopulate(val, "Type", &s.Type) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type SourceAddr. +func (s SourceAddr) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "cidrs", s.Cidrs) + populate(objectMap, "countries", s.Countries) + populate(objectMap, "feeds", s.Feeds) + populate(objectMap, "prefixLists", s.PrefixLists) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type SourceAddr. +func (s *SourceAddr) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "cidrs": + err = unpopulate(val, "Cidrs", &s.Cidrs) + delete(rawMsg, key) + case "countries": + err = unpopulate(val, "Countries", &s.Countries) + delete(rawMsg, key) + case "feeds": + err = unpopulate(val, "Feeds", &s.Feeds) + delete(rawMsg, key) + case "prefixLists": + err = unpopulate(val, "PrefixLists", &s.PrefixLists) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type StorageAccount. +func (s StorageAccount) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "accountName", s.AccountName) + populate(objectMap, "id", s.ID) + populate(objectMap, "subscriptionId", s.SubscriptionID) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type StorageAccount. +func (s *StorageAccount) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "accountName": + err = unpopulate(val, "AccountName", &s.AccountName) + delete(rawMsg, key) + case "id": + err = unpopulate(val, "ID", &s.ID) + delete(rawMsg, key) + case "subscriptionId": + err = unpopulate(val, "SubscriptionID", &s.SubscriptionID) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type SupportInfo. +func (s SupportInfo) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "accountId", s.AccountID) + populate(objectMap, "accountRegistered", s.AccountRegistered) + populate(objectMap, "freeTrial", s.FreeTrial) + populate(objectMap, "freeTrialCreditLeft", s.FreeTrialCreditLeft) + populate(objectMap, "freeTrialDaysLeft", s.FreeTrialDaysLeft) + populate(objectMap, "helpURL", s.HelpURL) + populate(objectMap, "productSku", s.ProductSKU) + populate(objectMap, "productSerial", s.ProductSerial) + populate(objectMap, "registerURL", s.RegisterURL) + populate(objectMap, "supportURL", s.SupportURL) + populate(objectMap, "userDomainSupported", s.UserDomainSupported) + populate(objectMap, "userRegistered", s.UserRegistered) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type SupportInfo. +func (s *SupportInfo) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "accountId": + err = unpopulate(val, "AccountID", &s.AccountID) + delete(rawMsg, key) + case "accountRegistered": + err = unpopulate(val, "AccountRegistered", &s.AccountRegistered) + delete(rawMsg, key) + case "freeTrial": + err = unpopulate(val, "FreeTrial", &s.FreeTrial) + delete(rawMsg, key) + case "freeTrialCreditLeft": + err = unpopulate(val, "FreeTrialCreditLeft", &s.FreeTrialCreditLeft) + delete(rawMsg, key) + case "freeTrialDaysLeft": + err = unpopulate(val, "FreeTrialDaysLeft", &s.FreeTrialDaysLeft) + delete(rawMsg, key) + case "helpURL": + err = unpopulate(val, "HelpURL", &s.HelpURL) + delete(rawMsg, key) + case "productSku": + err = unpopulate(val, "ProductSKU", &s.ProductSKU) + delete(rawMsg, key) + case "productSerial": + err = unpopulate(val, "ProductSerial", &s.ProductSerial) + delete(rawMsg, key) + case "registerURL": + err = unpopulate(val, "RegisterURL", &s.RegisterURL) + delete(rawMsg, key) + case "supportURL": + err = unpopulate(val, "SupportURL", &s.SupportURL) + delete(rawMsg, key) + case "userDomainSupported": + err = unpopulate(val, "UserDomainSupported", &s.UserDomainSupported) + delete(rawMsg, key) + case "userRegistered": + err = unpopulate(val, "UserRegistered", &s.UserRegistered) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type SystemData. +func (s SystemData) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populateTimeRFC3339(objectMap, "createdAt", s.CreatedAt) + populate(objectMap, "createdBy", s.CreatedBy) + populate(objectMap, "createdByType", s.CreatedByType) + populateTimeRFC3339(objectMap, "lastModifiedAt", s.LastModifiedAt) + populate(objectMap, "lastModifiedBy", s.LastModifiedBy) + populate(objectMap, "lastModifiedByType", s.LastModifiedByType) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type SystemData. +func (s *SystemData) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "createdAt": + err = unpopulateTimeRFC3339(val, "CreatedAt", &s.CreatedAt) + delete(rawMsg, key) + case "createdBy": + err = unpopulate(val, "CreatedBy", &s.CreatedBy) + delete(rawMsg, key) + case "createdByType": + err = unpopulate(val, "CreatedByType", &s.CreatedByType) + delete(rawMsg, key) + case "lastModifiedAt": + err = unpopulateTimeRFC3339(val, "LastModifiedAt", &s.LastModifiedAt) + delete(rawMsg, key) + case "lastModifiedBy": + err = unpopulate(val, "LastModifiedBy", &s.LastModifiedBy) + delete(rawMsg, key) + case "lastModifiedByType": + err = unpopulate(val, "LastModifiedByType", &s.LastModifiedByType) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type TagInfo. +func (t TagInfo) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "key", t.Key) + populate(objectMap, "value", t.Value) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type TagInfo. +func (t *TagInfo) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", t, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "key": + err = unpopulate(val, "Key", &t.Key) + delete(rawMsg, key) + case "value": + err = unpopulate(val, "Value", &t.Value) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", t, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type VnetConfiguration. +func (v VnetConfiguration) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "ipOfTrustSubnetForUdr", v.IPOfTrustSubnetForUdr) + populate(objectMap, "trustSubnet", v.TrustSubnet) + populate(objectMap, "unTrustSubnet", v.UnTrustSubnet) + populate(objectMap, "vnet", v.Vnet) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type VnetConfiguration. +func (v *VnetConfiguration) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", v, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "ipOfTrustSubnetForUdr": + err = unpopulate(val, "IPOfTrustSubnetForUdr", &v.IPOfTrustSubnetForUdr) + delete(rawMsg, key) + case "trustSubnet": + err = unpopulate(val, "TrustSubnet", &v.TrustSubnet) + delete(rawMsg, key) + case "unTrustSubnet": + err = unpopulate(val, "UnTrustSubnet", &v.UnTrustSubnet) + delete(rawMsg, key) + case "vnet": + err = unpopulate(val, "Vnet", &v.Vnet) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", v, err) + } + } + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type VwanConfiguration. +func (v VwanConfiguration) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "ipOfTrustSubnetForUdr", v.IPOfTrustSubnetForUdr) + populate(objectMap, "networkVirtualApplianceId", v.NetworkVirtualApplianceID) + populate(objectMap, "trustSubnet", v.TrustSubnet) + populate(objectMap, "unTrustSubnet", v.UnTrustSubnet) + populate(objectMap, "vHub", v.VHub) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type VwanConfiguration. +func (v *VwanConfiguration) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", v, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "ipOfTrustSubnetForUdr": + err = unpopulate(val, "IPOfTrustSubnetForUdr", &v.IPOfTrustSubnetForUdr) + delete(rawMsg, key) + case "networkVirtualApplianceId": + err = unpopulate(val, "NetworkVirtualApplianceID", &v.NetworkVirtualApplianceID) + delete(rawMsg, key) + case "trustSubnet": + err = unpopulate(val, "TrustSubnet", &v.TrustSubnet) + delete(rawMsg, key) + case "unTrustSubnet": + err = unpopulate(val, "UnTrustSubnet", &v.UnTrustSubnet) + delete(rawMsg, key) + case "vHub": + err = unpopulate(val, "VHub", &v.VHub) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", v, err) + } + } + return nil +} + +func populate(m map[string]any, k string, v any) { + if v == nil { + return + } else if azcore.IsNullValue(v) { + m[k] = nil + } else if !reflect.ValueOf(v).IsNil() { + m[k] = v + } +} + +func unpopulate(data json.RawMessage, fn string, v any) error { + if data == nil { + return nil + } + if err := json.Unmarshal(data, v); err != nil { + return fmt.Errorf("struct field %s: %v", fn, err) + } + return nil +} diff --git a/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/operations_client.go b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/operations_client.go new file mode 100644 index 000000000000..78a6eb8a14b1 --- /dev/null +++ b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/operations_client.go @@ -0,0 +1,94 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armpanngfw + +import ( + "context" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" +) + +// OperationsClient contains the methods for the Operations group. +// Don't use this type directly, use NewOperationsClient() instead. +type OperationsClient struct { + internal *arm.Client +} + +// NewOperationsClient creates a new instance of OperationsClient with the specified values. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewOperationsClient(credential azcore.TokenCredential, options *arm.ClientOptions) (*OperationsClient, error) { + cl, err := arm.NewClient(moduleName+".OperationsClient", moduleVersion, credential, options) + if err != nil { + return nil, err + } + client := &OperationsClient{ + internal: cl, + } + return client, nil +} + +// NewListPager - List the operations for the provider +// +// Generated from API version 2022-08-29-preview +// - options - OperationsClientListOptions contains the optional parameters for the OperationsClient.NewListPager method. +func (client *OperationsClient) NewListPager(options *OperationsClientListOptions) *runtime.Pager[OperationsClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[OperationsClientListResponse]{ + More: func(page OperationsClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *OperationsClientListResponse) (OperationsClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return OperationsClientListResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return OperationsClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return OperationsClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *OperationsClient) listCreateRequest(ctx context.Context, options *OperationsClientListOptions) (*policy.Request, error) { + urlPath := "/providers/PaloAltoNetworks.Cloudngfw/operations" + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *OperationsClient) listHandleResponse(resp *http.Response) (OperationsClientListResponse, error) { + result := OperationsClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.OperationListResult); err != nil { + return OperationsClientListResponse{}, err + } + return result, nil +} diff --git a/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/operations_client_example_test.go b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/operations_client_example_test.go new file mode 100644 index 000000000000..e2b9dff810bb --- /dev/null +++ b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/operations_client_example_test.go @@ -0,0 +1,85 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armpanngfw_test + +import ( + "context" + "log" + + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw" +) + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/Operations_List_MaximumSet_Gen.json +func ExampleOperationsClient_NewListPager_operationsListMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + pager := clientFactory.NewOperationsClient().NewListPager(nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + log.Fatalf("failed to advance page: %v", err) + } + for _, v := range page.Value { + // You could use page here. We use blank identifier for just demo purposes. + _ = v + } + // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // page.OperationListResult = armpanngfw.OperationListResult{ + // Value: []*armpanngfw.Operation{ + // { + // Name: to.Ptr("aaa"), + // ActionType: to.Ptr(armpanngfw.ActionTypeInternal), + // Display: &armpanngfw.OperationDisplay{ + // Description: to.Ptr("aaaaaaaa"), + // Operation: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaa"), + // Provider: to.Ptr("aaaaaaaaaaa"), + // Resource: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // }, + // IsDataAction: to.Ptr(true), + // Origin: to.Ptr(armpanngfw.OriginUser), + // }}, + // } + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/Operations_List_MinimumSet_Gen.json +func ExampleOperationsClient_NewListPager_operationsListMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + pager := clientFactory.NewOperationsClient().NewListPager(nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + log.Fatalf("failed to advance page: %v", err) + } + for _, v := range page.Value { + // You could use page here. We use blank identifier for just demo purposes. + _ = v + } + // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // page.OperationListResult = armpanngfw.OperationListResult{ + // } + } +} diff --git a/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/postrules_client.go b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/postrules_client.go new file mode 100644 index 000000000000..4d5021eef05b --- /dev/null +++ b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/postrules_client.go @@ -0,0 +1,441 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armpanngfw + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// PostRulesClient contains the methods for the PostRules group. +// Don't use this type directly, use NewPostRulesClient() instead. +type PostRulesClient struct { + internal *arm.Client +} + +// NewPostRulesClient creates a new instance of PostRulesClient with the specified values. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewPostRulesClient(credential azcore.TokenCredential, options *arm.ClientOptions) (*PostRulesClient, error) { + cl, err := arm.NewClient(moduleName+".PostRulesClient", moduleVersion, credential, options) + if err != nil { + return nil, err + } + client := &PostRulesClient{ + internal: cl, + } + return client, nil +} + +// BeginCreateOrUpdate - Create a PostRulesResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - globalRulestackName - GlobalRulestack resource name +// - priority - Post Rule priority +// - resource - Resource create parameters. +// - options - PostRulesClientBeginCreateOrUpdateOptions contains the optional parameters for the PostRulesClient.BeginCreateOrUpdate +// method. +func (client *PostRulesClient) BeginCreateOrUpdate(ctx context.Context, globalRulestackName string, priority string, resource PostRulesResource, options *PostRulesClientBeginCreateOrUpdateOptions) (*runtime.Poller[PostRulesClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, globalRulestackName, priority, resource, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[PostRulesClientCreateOrUpdateResponse]{ + FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + }) + } else { + return runtime.NewPollerFromResumeToken[PostRulesClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// CreateOrUpdate - Create a PostRulesResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +func (client *PostRulesClient) createOrUpdate(ctx context.Context, globalRulestackName string, priority string, resource PostRulesResource, options *PostRulesClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, globalRulestackName, priority, resource, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *PostRulesClient) createOrUpdateCreateRequest(ctx context.Context, globalRulestackName string, priority string, resource PostRulesResource, options *PostRulesClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/{globalRulestackName}/postRules/{priority}" + if globalRulestackName == "" { + return nil, errors.New("parameter globalRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{globalRulestackName}", url.PathEscape(globalRulestackName)) + if priority == "" { + return nil, errors.New("parameter priority cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{priority}", url.PathEscape(priority)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, resource) +} + +// BeginDelete - Delete a PostRulesResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - globalRulestackName - GlobalRulestack resource name +// - priority - Post Rule priority +// - options - PostRulesClientBeginDeleteOptions contains the optional parameters for the PostRulesClient.BeginDelete method. +func (client *PostRulesClient) BeginDelete(ctx context.Context, globalRulestackName string, priority string, options *PostRulesClientBeginDeleteOptions) (*runtime.Poller[PostRulesClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, globalRulestackName, priority, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[PostRulesClientDeleteResponse]{ + FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + }) + } else { + return runtime.NewPollerFromResumeToken[PostRulesClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// Delete - Delete a PostRulesResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +func (client *PostRulesClient) deleteOperation(ctx context.Context, globalRulestackName string, priority string, options *PostRulesClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, globalRulestackName, priority, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *PostRulesClient) deleteCreateRequest(ctx context.Context, globalRulestackName string, priority string, options *PostRulesClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/{globalRulestackName}/postRules/{priority}" + if globalRulestackName == "" { + return nil, errors.New("parameter globalRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{globalRulestackName}", url.PathEscape(globalRulestackName)) + if priority == "" { + return nil, errors.New("parameter priority cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{priority}", url.PathEscape(priority)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Get a PostRulesResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - globalRulestackName - GlobalRulestack resource name +// - priority - Post Rule priority +// - options - PostRulesClientGetOptions contains the optional parameters for the PostRulesClient.Get method. +func (client *PostRulesClient) Get(ctx context.Context, globalRulestackName string, priority string, options *PostRulesClientGetOptions) (PostRulesClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, globalRulestackName, priority, options) + if err != nil { + return PostRulesClientGetResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PostRulesClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return PostRulesClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *PostRulesClient) getCreateRequest(ctx context.Context, globalRulestackName string, priority string, options *PostRulesClientGetOptions) (*policy.Request, error) { + urlPath := "/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/{globalRulestackName}/postRules/{priority}" + if globalRulestackName == "" { + return nil, errors.New("parameter globalRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{globalRulestackName}", url.PathEscape(globalRulestackName)) + if priority == "" { + return nil, errors.New("parameter priority cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{priority}", url.PathEscape(priority)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *PostRulesClient) getHandleResponse(resp *http.Response) (PostRulesClientGetResponse, error) { + result := PostRulesClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.PostRulesResource); err != nil { + return PostRulesClientGetResponse{}, err + } + return result, nil +} + +// GetCounters - Get counters +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - globalRulestackName - GlobalRulestack resource name +// - priority - Post Rule priority +// - options - PostRulesClientGetCountersOptions contains the optional parameters for the PostRulesClient.GetCounters method. +func (client *PostRulesClient) GetCounters(ctx context.Context, globalRulestackName string, priority string, options *PostRulesClientGetCountersOptions) (PostRulesClientGetCountersResponse, error) { + req, err := client.getCountersCreateRequest(ctx, globalRulestackName, priority, options) + if err != nil { + return PostRulesClientGetCountersResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PostRulesClientGetCountersResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return PostRulesClientGetCountersResponse{}, runtime.NewResponseError(resp) + } + return client.getCountersHandleResponse(resp) +} + +// getCountersCreateRequest creates the GetCounters request. +func (client *PostRulesClient) getCountersCreateRequest(ctx context.Context, globalRulestackName string, priority string, options *PostRulesClientGetCountersOptions) (*policy.Request, error) { + urlPath := "/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/{globalRulestackName}/postRules/{priority}/getCounters" + if globalRulestackName == "" { + return nil, errors.New("parameter globalRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{globalRulestackName}", url.PathEscape(globalRulestackName)) + if priority == "" { + return nil, errors.New("parameter priority cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{priority}", url.PathEscape(priority)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + if options != nil && options.FirewallName != nil { + reqQP.Set("firewallName", *options.FirewallName) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getCountersHandleResponse handles the GetCounters response. +func (client *PostRulesClient) getCountersHandleResponse(resp *http.Response) (PostRulesClientGetCountersResponse, error) { + result := PostRulesClientGetCountersResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.RuleCounter); err != nil { + return PostRulesClientGetCountersResponse{}, err + } + return result, nil +} + +// NewListPager - List PostRulesResource resources by Tenant +// +// Generated from API version 2022-08-29-preview +// - globalRulestackName - GlobalRulestack resource name +// - options - PostRulesClientListOptions contains the optional parameters for the PostRulesClient.NewListPager method. +func (client *PostRulesClient) NewListPager(globalRulestackName string, options *PostRulesClientListOptions) *runtime.Pager[PostRulesClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[PostRulesClientListResponse]{ + More: func(page PostRulesClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *PostRulesClientListResponse) (PostRulesClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, globalRulestackName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return PostRulesClientListResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PostRulesClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return PostRulesClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *PostRulesClient) listCreateRequest(ctx context.Context, globalRulestackName string, options *PostRulesClientListOptions) (*policy.Request, error) { + urlPath := "/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/{globalRulestackName}/postRules" + if globalRulestackName == "" { + return nil, errors.New("parameter globalRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{globalRulestackName}", url.PathEscape(globalRulestackName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *PostRulesClient) listHandleResponse(resp *http.Response) (PostRulesClientListResponse, error) { + result := PostRulesClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.PostRulesResourceListResult); err != nil { + return PostRulesClientListResponse{}, err + } + return result, nil +} + +// RefreshCounters - Refresh counters +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - globalRulestackName - GlobalRulestack resource name +// - priority - Post Rule priority +// - options - PostRulesClientRefreshCountersOptions contains the optional parameters for the PostRulesClient.RefreshCounters +// method. +func (client *PostRulesClient) RefreshCounters(ctx context.Context, globalRulestackName string, priority string, options *PostRulesClientRefreshCountersOptions) (PostRulesClientRefreshCountersResponse, error) { + req, err := client.refreshCountersCreateRequest(ctx, globalRulestackName, priority, options) + if err != nil { + return PostRulesClientRefreshCountersResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PostRulesClientRefreshCountersResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusNoContent) { + return PostRulesClientRefreshCountersResponse{}, runtime.NewResponseError(resp) + } + return PostRulesClientRefreshCountersResponse{}, nil +} + +// refreshCountersCreateRequest creates the RefreshCounters request. +func (client *PostRulesClient) refreshCountersCreateRequest(ctx context.Context, globalRulestackName string, priority string, options *PostRulesClientRefreshCountersOptions) (*policy.Request, error) { + urlPath := "/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/{globalRulestackName}/postRules/{priority}/refreshCounters" + if globalRulestackName == "" { + return nil, errors.New("parameter globalRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{globalRulestackName}", url.PathEscape(globalRulestackName)) + if priority == "" { + return nil, errors.New("parameter priority cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{priority}", url.PathEscape(priority)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + if options != nil && options.FirewallName != nil { + reqQP.Set("firewallName", *options.FirewallName) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// ResetCounters - Reset counters +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - globalRulestackName - GlobalRulestack resource name +// - priority - Post Rule priority +// - options - PostRulesClientResetCountersOptions contains the optional parameters for the PostRulesClient.ResetCounters method. +func (client *PostRulesClient) ResetCounters(ctx context.Context, globalRulestackName string, priority string, options *PostRulesClientResetCountersOptions) (PostRulesClientResetCountersResponse, error) { + req, err := client.resetCountersCreateRequest(ctx, globalRulestackName, priority, options) + if err != nil { + return PostRulesClientResetCountersResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PostRulesClientResetCountersResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return PostRulesClientResetCountersResponse{}, runtime.NewResponseError(resp) + } + return client.resetCountersHandleResponse(resp) +} + +// resetCountersCreateRequest creates the ResetCounters request. +func (client *PostRulesClient) resetCountersCreateRequest(ctx context.Context, globalRulestackName string, priority string, options *PostRulesClientResetCountersOptions) (*policy.Request, error) { + urlPath := "/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/{globalRulestackName}/postRules/{priority}/resetCounters" + if globalRulestackName == "" { + return nil, errors.New("parameter globalRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{globalRulestackName}", url.PathEscape(globalRulestackName)) + if priority == "" { + return nil, errors.New("parameter priority cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{priority}", url.PathEscape(priority)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + if options != nil && options.FirewallName != nil { + reqQP.Set("firewallName", *options.FirewallName) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// resetCountersHandleResponse handles the ResetCounters response. +func (client *PostRulesClient) resetCountersHandleResponse(resp *http.Response) (PostRulesClientResetCountersResponse, error) { + result := PostRulesClientResetCountersResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.RuleCounterReset); err != nil { + return PostRulesClientResetCountersResponse{}, err + } + return result, nil +} diff --git a/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/postrules_client_example_test.go b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/postrules_client_example_test.go new file mode 100644 index 000000000000..70d6be251a30 --- /dev/null +++ b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/postrules_client_example_test.go @@ -0,0 +1,629 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armpanngfw_test + +import ( + "context" + "log" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw" +) + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PostRules_List_MaximumSet_Gen.json +func ExamplePostRulesClient_NewListPager_postRulesListMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + pager := clientFactory.NewPostRulesClient().NewListPager("lrs1", nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + log.Fatalf("failed to advance page: %v", err) + } + for _, v := range page.Value { + // You could use page here. We use blank identifier for just demo purposes. + _ = v + } + // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // page.PostRulesResourceListResult = armpanngfw.PostRulesResourceListResult{ + // Value: []*armpanngfw.PostRulesResource{ + // { + // Name: to.Ptr("aaaaaaaaaa"), + // Type: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // ID: to.Ptr("aaaaaaaaaaaaaaaaaa"), + // SystemData: &armpanngfw.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // CreatedBy: to.Ptr("praval"), + // CreatedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // LastModifiedBy: to.Ptr("praval"), + // LastModifiedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // }, + // Properties: &armpanngfw.RuleEntry{ + // Description: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // ActionType: to.Ptr(armpanngfw.ActionEnumAllow), + // Applications: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")}, + // AuditComment: to.Ptr("aaa"), + // Category: &armpanngfw.Category{ + // Feeds: []*string{ + // to.Ptr("aaaaaaaaaaaa")}, + // URLCustom: []*string{ + // to.Ptr("aaaaa")}, + // }, + // DecryptionRuleType: to.Ptr(armpanngfw.DecryptionRuleTypeEnumSSLOutboundInspection), + // Destination: &armpanngfw.DestinationAddr{ + // Cidrs: []*string{ + // to.Ptr("aaaaaaa")}, + // Countries: []*string{ + // to.Ptr("aaaaaaaaaaaaaa")}, + // Feeds: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaa")}, + // FqdnLists: []*string{ + // to.Ptr("aaaaaaaaaaaaa")}, + // PrefixLists: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")}, + // }, + // EnableLogging: to.Ptr(armpanngfw.StateEnumDISABLED), + // Etag: to.Ptr("aaaaaaaaaaaaaaaaaaaa"), + // InboundInspectionCertificate: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // NegateDestination: to.Ptr(armpanngfw.BooleanEnumTRUE), + // NegateSource: to.Ptr(armpanngfw.BooleanEnumTRUE), + // Priority: to.Ptr[int32](24), + // ProtocolPortList: []*string{ + // to.Ptr("aaaaaaaaaaaa")}, + // ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateSucceeded), + // RuleName: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaa"), + // RuleState: to.Ptr(armpanngfw.StateEnumDISABLED), + // Source: &armpanngfw.SourceAddr{ + // Cidrs: []*string{ + // to.Ptr("aaa")}, + // Countries: []*string{ + // to.Ptr("aaaaa")}, + // Feeds: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaa")}, + // PrefixLists: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaa")}, + // }, + // Tags: []*armpanngfw.TagInfo{ + // { + // Key: to.Ptr("keyName"), + // Value: to.Ptr("value"), + // }}, + // Protocol: to.Ptr("aaaa"), + // }, + // }}, + // } + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PostRules_List_MinimumSet_Gen.json +func ExamplePostRulesClient_NewListPager_postRulesListMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + pager := clientFactory.NewPostRulesClient().NewListPager("lrs1", nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + log.Fatalf("failed to advance page: %v", err) + } + for _, v := range page.Value { + // You could use page here. We use blank identifier for just demo purposes. + _ = v + } + // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // page.PostRulesResourceListResult = armpanngfw.PostRulesResourceListResult{ + // Value: []*armpanngfw.PostRulesResource{ + // { + // ID: to.Ptr("/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/lrs1/postrules/1"), + // Properties: &armpanngfw.RuleEntry{ + // RuleName: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaa"), + // }, + // }}, + // } + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PostRules_Get_MaximumSet_Gen.json +func ExamplePostRulesClient_Get_postRulesGetMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewPostRulesClient().Get(ctx, "lrs1", "1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.PostRulesResource = armpanngfw.PostRulesResource{ + // Name: to.Ptr("aaaaaaaaaa"), + // Type: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // ID: to.Ptr("aaaaaaaaaaaaaaaaaa"), + // SystemData: &armpanngfw.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // CreatedBy: to.Ptr("praval"), + // CreatedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // LastModifiedBy: to.Ptr("praval"), + // LastModifiedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // }, + // Properties: &armpanngfw.RuleEntry{ + // Description: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // ActionType: to.Ptr(armpanngfw.ActionEnumAllow), + // Applications: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")}, + // AuditComment: to.Ptr("aaa"), + // Category: &armpanngfw.Category{ + // Feeds: []*string{ + // to.Ptr("aaaaaaaaaaaa")}, + // URLCustom: []*string{ + // to.Ptr("aaaaa")}, + // }, + // DecryptionRuleType: to.Ptr(armpanngfw.DecryptionRuleTypeEnumSSLOutboundInspection), + // Destination: &armpanngfw.DestinationAddr{ + // Cidrs: []*string{ + // to.Ptr("aaaaaaa")}, + // Countries: []*string{ + // to.Ptr("aaaaaaaaaaaaaa")}, + // Feeds: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaa")}, + // FqdnLists: []*string{ + // to.Ptr("aaaaaaaaaaaaa")}, + // PrefixLists: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")}, + // }, + // EnableLogging: to.Ptr(armpanngfw.StateEnumDISABLED), + // Etag: to.Ptr("aaaaaaaaaaaaaaaaaaaa"), + // InboundInspectionCertificate: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // NegateDestination: to.Ptr(armpanngfw.BooleanEnumTRUE), + // NegateSource: to.Ptr(armpanngfw.BooleanEnumTRUE), + // Priority: to.Ptr[int32](24), + // ProtocolPortList: []*string{ + // to.Ptr("aaaaaaaaaaaa")}, + // ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateSucceeded), + // RuleName: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaa"), + // RuleState: to.Ptr(armpanngfw.StateEnumDISABLED), + // Source: &armpanngfw.SourceAddr{ + // Cidrs: []*string{ + // to.Ptr("aaa")}, + // Countries: []*string{ + // to.Ptr("aaaaa")}, + // Feeds: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaa")}, + // PrefixLists: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaa")}, + // }, + // Tags: []*armpanngfw.TagInfo{ + // { + // Key: to.Ptr("keyName"), + // Value: to.Ptr("value"), + // }}, + // Protocol: to.Ptr("aaaa"), + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PostRules_Get_MinimumSet_Gen.json +func ExamplePostRulesClient_Get_postRulesGetMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewPostRulesClient().Get(ctx, "lrs1", "1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.PostRulesResource = armpanngfw.PostRulesResource{ + // ID: to.Ptr("/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/lrs1/postrules/1"), + // Properties: &armpanngfw.RuleEntry{ + // RuleName: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaa"), + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PostRules_CreateOrUpdate_MaximumSet_Gen.json +func ExamplePostRulesClient_BeginCreateOrUpdate_postRulesCreateOrUpdateMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewPostRulesClient().BeginCreateOrUpdate(ctx, "lrs1", "1", armpanngfw.PostRulesResource{ + Properties: &armpanngfw.RuleEntry{ + Description: to.Ptr("description of post rule"), + ActionType: to.Ptr(armpanngfw.ActionEnumAllow), + Applications: []*string{ + to.Ptr("app1")}, + AuditComment: to.Ptr("example comment"), + Category: &armpanngfw.Category{ + Feeds: []*string{ + to.Ptr("feed")}, + URLCustom: []*string{ + to.Ptr("https://microsoft.com")}, + }, + DecryptionRuleType: to.Ptr(armpanngfw.DecryptionRuleTypeEnumSSLOutboundInspection), + Destination: &armpanngfw.DestinationAddr{ + Cidrs: []*string{ + to.Ptr("1.0.0.1/10")}, + Countries: []*string{ + to.Ptr("India")}, + Feeds: []*string{ + to.Ptr("feed")}, + FqdnLists: []*string{ + to.Ptr("FQDN1")}, + PrefixLists: []*string{ + to.Ptr("PL1")}, + }, + EnableLogging: to.Ptr(armpanngfw.StateEnumDISABLED), + Etag: to.Ptr("c18e6eef-ba3e-49ee-8a85-2b36c863a9d0"), + InboundInspectionCertificate: to.Ptr("cert1"), + NegateDestination: to.Ptr(armpanngfw.BooleanEnumTRUE), + NegateSource: to.Ptr(armpanngfw.BooleanEnumTRUE), + ProtocolPortList: []*string{ + to.Ptr("80")}, + ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateAccepted), + RuleName: to.Ptr("postRule1"), + RuleState: to.Ptr(armpanngfw.StateEnumDISABLED), + Source: &armpanngfw.SourceAddr{ + Cidrs: []*string{ + to.Ptr("1.0.0.1/10")}, + Countries: []*string{ + to.Ptr("India")}, + Feeds: []*string{ + to.Ptr("feed")}, + PrefixLists: []*string{ + to.Ptr("PL1")}, + }, + Tags: []*armpanngfw.TagInfo{ + { + Key: to.Ptr("keyName"), + Value: to.Ptr("value"), + }}, + Protocol: to.Ptr("HTTP"), + }, + }, nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + res, err := poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.PostRulesResource = armpanngfw.PostRulesResource{ + // Name: to.Ptr("aaaaaaaaaa"), + // Type: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // ID: to.Ptr("aaaaaaaaaaaaaaaaaa"), + // SystemData: &armpanngfw.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // CreatedBy: to.Ptr("praval"), + // CreatedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // LastModifiedBy: to.Ptr("praval"), + // LastModifiedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // }, + // Properties: &armpanngfw.RuleEntry{ + // Description: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // ActionType: to.Ptr(armpanngfw.ActionEnumAllow), + // Applications: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")}, + // AuditComment: to.Ptr("aaa"), + // Category: &armpanngfw.Category{ + // Feeds: []*string{ + // to.Ptr("aaaaaaaaaaaa")}, + // URLCustom: []*string{ + // to.Ptr("aaaaa")}, + // }, + // DecryptionRuleType: to.Ptr(armpanngfw.DecryptionRuleTypeEnumSSLOutboundInspection), + // Destination: &armpanngfw.DestinationAddr{ + // Cidrs: []*string{ + // to.Ptr("aaaaaaa")}, + // Countries: []*string{ + // to.Ptr("aaaaaaaaaaaaaa")}, + // Feeds: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaa")}, + // FqdnLists: []*string{ + // to.Ptr("aaaaaaaaaaaaa")}, + // PrefixLists: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")}, + // }, + // EnableLogging: to.Ptr(armpanngfw.StateEnumDISABLED), + // Etag: to.Ptr("aaaaaaaaaaaaaaaaaaaa"), + // InboundInspectionCertificate: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // NegateDestination: to.Ptr(armpanngfw.BooleanEnumTRUE), + // NegateSource: to.Ptr(armpanngfw.BooleanEnumTRUE), + // Priority: to.Ptr[int32](24), + // ProtocolPortList: []*string{ + // to.Ptr("aaaaaaaaaaaa")}, + // ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateSucceeded), + // RuleName: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaa"), + // RuleState: to.Ptr(armpanngfw.StateEnumDISABLED), + // Source: &armpanngfw.SourceAddr{ + // Cidrs: []*string{ + // to.Ptr("aaa")}, + // Countries: []*string{ + // to.Ptr("aaaaa")}, + // Feeds: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaa")}, + // PrefixLists: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaa")}, + // }, + // Tags: []*armpanngfw.TagInfo{ + // { + // Key: to.Ptr("keyName"), + // Value: to.Ptr("value"), + // }}, + // Protocol: to.Ptr("aaaa"), + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PostRules_CreateOrUpdate_MinimumSet_Gen.json +func ExamplePostRulesClient_BeginCreateOrUpdate_postRulesCreateOrUpdateMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewPostRulesClient().BeginCreateOrUpdate(ctx, "lrs1", "1", armpanngfw.PostRulesResource{ + Properties: &armpanngfw.RuleEntry{ + RuleName: to.Ptr("postRule1"), + }, + }, nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + res, err := poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.PostRulesResource = armpanngfw.PostRulesResource{ + // ID: to.Ptr("/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/lrs1/postrules/1"), + // Properties: &armpanngfw.RuleEntry{ + // RuleName: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaa"), + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PostRules_Delete_MaximumSet_Gen.json +func ExamplePostRulesClient_BeginDelete_postRulesDeleteMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewPostRulesClient().BeginDelete(ctx, "lrs1", "1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PostRules_Delete_MinimumSet_Gen.json +func ExamplePostRulesClient_BeginDelete_postRulesDeleteMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewPostRulesClient().BeginDelete(ctx, "lrs1", "1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PostRules_getCounters_MaximumSet_Gen.json +func ExamplePostRulesClient_GetCounters_postRulesGetCountersMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewPostRulesClient().GetCounters(ctx, "lrs1", "1", &armpanngfw.PostRulesClientGetCountersOptions{FirewallName: to.Ptr("firewall1")}) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.RuleCounter = armpanngfw.RuleCounter{ + // AppSeen: &armpanngfw.AppSeenData{ + // AppSeenList: []*armpanngfw.AppSeenInfo{ + // { + // Category: to.Ptr("aaaaaaaaaaaaaaaaaaa"), + // Risk: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaa"), + // StandardPorts: to.Ptr("aaaaaaaaaaaaaaaaaa"), + // SubCategory: to.Ptr("aaaaaaaaaaaaaaaaa"), + // Tag: to.Ptr("aaaaaaaaaa"), + // Technology: to.Ptr("aaaaaaaaaaaaaaaaaaaa"), + // Title: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // }}, + // Count: to.Ptr[int32](13), + // }, + // FirewallName: to.Ptr("aaaaaaaaaaaaaaaaaa"), + // HitCount: to.Ptr[int32](20), + // LastUpdatedTimestamp: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // Priority: to.Ptr("aaaaaaaaaaaaaaaaaaaa"), + // RequestTimestamp: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // RuleListName: to.Ptr("aaaaaaaaaaaaaaaaaaa"), + // RuleName: to.Ptr("aaaa"), + // RuleStackName: to.Ptr("aaaaaaaaaaaaaaaaa"), + // Timestamp: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PostRules_getCounters_MinimumSet_Gen.json +func ExamplePostRulesClient_GetCounters_postRulesGetCountersMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewPostRulesClient().GetCounters(ctx, "lrs1", "1", &armpanngfw.PostRulesClientGetCountersOptions{FirewallName: nil}) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.RuleCounter = armpanngfw.RuleCounter{ + // Priority: to.Ptr("aaaaaaaaaaaaaaaaaaaa"), + // RuleName: to.Ptr("aaaa"), + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PostRules_refreshCounters_MaximumSet_Gen.json +func ExamplePostRulesClient_RefreshCounters_postRulesRefreshCountersMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + _, err = clientFactory.NewPostRulesClient().RefreshCounters(ctx, "lrs1", "1", &armpanngfw.PostRulesClientRefreshCountersOptions{FirewallName: to.Ptr("firewall1")}) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PostRules_refreshCounters_MinimumSet_Gen.json +func ExamplePostRulesClient_RefreshCounters_postRulesRefreshCountersMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + _, err = clientFactory.NewPostRulesClient().RefreshCounters(ctx, "lrs1", "1", &armpanngfw.PostRulesClientRefreshCountersOptions{FirewallName: nil}) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PostRules_resetCounters_MaximumSet_Gen.json +func ExamplePostRulesClient_ResetCounters_postRulesResetCountersMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewPostRulesClient().ResetCounters(ctx, "lrs1", "1", &armpanngfw.PostRulesClientResetCountersOptions{FirewallName: to.Ptr("firewall1")}) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.RuleCounterReset = armpanngfw.RuleCounterReset{ + // FirewallName: to.Ptr("aaaaaaaaaaaaaaaaaa"), + // Priority: to.Ptr("aaaaaaa"), + // RuleListName: to.Ptr("aaaaa"), + // RuleName: to.Ptr("aaaaa"), + // RuleStackName: to.Ptr("aa"), + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PostRules_resetCounters_MinimumSet_Gen.json +func ExamplePostRulesClient_ResetCounters_postRulesResetCountersMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewPostRulesClient().ResetCounters(ctx, "lrs1", "1", &armpanngfw.PostRulesClientResetCountersOptions{FirewallName: nil}) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.RuleCounterReset = armpanngfw.RuleCounterReset{ + // } +} diff --git a/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/prefixlistglobalrulestack_client.go b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/prefixlistglobalrulestack_client.go new file mode 100644 index 000000000000..12dc57df8b7e --- /dev/null +++ b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/prefixlistglobalrulestack_client.go @@ -0,0 +1,284 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armpanngfw + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// PrefixListGlobalRulestackClient contains the methods for the PrefixListGlobalRulestack group. +// Don't use this type directly, use NewPrefixListGlobalRulestackClient() instead. +type PrefixListGlobalRulestackClient struct { + internal *arm.Client +} + +// NewPrefixListGlobalRulestackClient creates a new instance of PrefixListGlobalRulestackClient with the specified values. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewPrefixListGlobalRulestackClient(credential azcore.TokenCredential, options *arm.ClientOptions) (*PrefixListGlobalRulestackClient, error) { + cl, err := arm.NewClient(moduleName+".PrefixListGlobalRulestackClient", moduleVersion, credential, options) + if err != nil { + return nil, err + } + client := &PrefixListGlobalRulestackClient{ + internal: cl, + } + return client, nil +} + +// BeginCreateOrUpdate - Create a PrefixListGlobalRulestackResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - globalRulestackName - GlobalRulestack resource name +// - name - Local Rule priority +// - resource - Resource create parameters. +// - options - PrefixListGlobalRulestackClientBeginCreateOrUpdateOptions contains the optional parameters for the PrefixListGlobalRulestackClient.BeginCreateOrUpdate +// method. +func (client *PrefixListGlobalRulestackClient) BeginCreateOrUpdate(ctx context.Context, globalRulestackName string, name string, resource PrefixListGlobalRulestackResource, options *PrefixListGlobalRulestackClientBeginCreateOrUpdateOptions) (*runtime.Poller[PrefixListGlobalRulestackClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, globalRulestackName, name, resource, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[PrefixListGlobalRulestackClientCreateOrUpdateResponse]{ + FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + }) + } else { + return runtime.NewPollerFromResumeToken[PrefixListGlobalRulestackClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// CreateOrUpdate - Create a PrefixListGlobalRulestackResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +func (client *PrefixListGlobalRulestackClient) createOrUpdate(ctx context.Context, globalRulestackName string, name string, resource PrefixListGlobalRulestackResource, options *PrefixListGlobalRulestackClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, globalRulestackName, name, resource, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *PrefixListGlobalRulestackClient) createOrUpdateCreateRequest(ctx context.Context, globalRulestackName string, name string, resource PrefixListGlobalRulestackResource, options *PrefixListGlobalRulestackClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/{globalRulestackName}/prefixlists/{name}" + if globalRulestackName == "" { + return nil, errors.New("parameter globalRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{globalRulestackName}", url.PathEscape(globalRulestackName)) + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, resource) +} + +// BeginDelete - Delete a PrefixListGlobalRulestackResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - globalRulestackName - GlobalRulestack resource name +// - name - Local Rule priority +// - options - PrefixListGlobalRulestackClientBeginDeleteOptions contains the optional parameters for the PrefixListGlobalRulestackClient.BeginDelete +// method. +func (client *PrefixListGlobalRulestackClient) BeginDelete(ctx context.Context, globalRulestackName string, name string, options *PrefixListGlobalRulestackClientBeginDeleteOptions) (*runtime.Poller[PrefixListGlobalRulestackClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, globalRulestackName, name, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[PrefixListGlobalRulestackClientDeleteResponse]{ + FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + }) + } else { + return runtime.NewPollerFromResumeToken[PrefixListGlobalRulestackClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// Delete - Delete a PrefixListGlobalRulestackResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +func (client *PrefixListGlobalRulestackClient) deleteOperation(ctx context.Context, globalRulestackName string, name string, options *PrefixListGlobalRulestackClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, globalRulestackName, name, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *PrefixListGlobalRulestackClient) deleteCreateRequest(ctx context.Context, globalRulestackName string, name string, options *PrefixListGlobalRulestackClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/{globalRulestackName}/prefixlists/{name}" + if globalRulestackName == "" { + return nil, errors.New("parameter globalRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{globalRulestackName}", url.PathEscape(globalRulestackName)) + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Get a PrefixListGlobalRulestackResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - globalRulestackName - GlobalRulestack resource name +// - name - Local Rule priority +// - options - PrefixListGlobalRulestackClientGetOptions contains the optional parameters for the PrefixListGlobalRulestackClient.Get +// method. +func (client *PrefixListGlobalRulestackClient) Get(ctx context.Context, globalRulestackName string, name string, options *PrefixListGlobalRulestackClientGetOptions) (PrefixListGlobalRulestackClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, globalRulestackName, name, options) + if err != nil { + return PrefixListGlobalRulestackClientGetResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PrefixListGlobalRulestackClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return PrefixListGlobalRulestackClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *PrefixListGlobalRulestackClient) getCreateRequest(ctx context.Context, globalRulestackName string, name string, options *PrefixListGlobalRulestackClientGetOptions) (*policy.Request, error) { + urlPath := "/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/{globalRulestackName}/prefixlists/{name}" + if globalRulestackName == "" { + return nil, errors.New("parameter globalRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{globalRulestackName}", url.PathEscape(globalRulestackName)) + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *PrefixListGlobalRulestackClient) getHandleResponse(resp *http.Response) (PrefixListGlobalRulestackClientGetResponse, error) { + result := PrefixListGlobalRulestackClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.PrefixListGlobalRulestackResource); err != nil { + return PrefixListGlobalRulestackClientGetResponse{}, err + } + return result, nil +} + +// NewListPager - List PrefixListGlobalRulestackResource resources by Tenant +// +// Generated from API version 2022-08-29-preview +// - globalRulestackName - GlobalRulestack resource name +// - options - PrefixListGlobalRulestackClientListOptions contains the optional parameters for the PrefixListGlobalRulestackClient.NewListPager +// method. +func (client *PrefixListGlobalRulestackClient) NewListPager(globalRulestackName string, options *PrefixListGlobalRulestackClientListOptions) *runtime.Pager[PrefixListGlobalRulestackClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[PrefixListGlobalRulestackClientListResponse]{ + More: func(page PrefixListGlobalRulestackClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *PrefixListGlobalRulestackClientListResponse) (PrefixListGlobalRulestackClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, globalRulestackName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return PrefixListGlobalRulestackClientListResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PrefixListGlobalRulestackClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return PrefixListGlobalRulestackClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *PrefixListGlobalRulestackClient) listCreateRequest(ctx context.Context, globalRulestackName string, options *PrefixListGlobalRulestackClientListOptions) (*policy.Request, error) { + urlPath := "/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/{globalRulestackName}/prefixlists" + if globalRulestackName == "" { + return nil, errors.New("parameter globalRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{globalRulestackName}", url.PathEscape(globalRulestackName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *PrefixListGlobalRulestackClient) listHandleResponse(resp *http.Response) (PrefixListGlobalRulestackClientListResponse, error) { + result := PrefixListGlobalRulestackClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.PrefixListGlobalRulestackResourceListResult); err != nil { + return PrefixListGlobalRulestackClientListResponse{}, err + } + return result, nil +} diff --git a/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/prefixlistglobalrulestack_client_example_test.go b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/prefixlistglobalrulestack_client_example_test.go new file mode 100644 index 000000000000..e5b66dd44d85 --- /dev/null +++ b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/prefixlistglobalrulestack_client_example_test.go @@ -0,0 +1,303 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armpanngfw_test + +import ( + "context" + "log" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw" +) + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PrefixListGlobalRulestack_List_MaximumSet_Gen.json +func ExamplePrefixListGlobalRulestackClient_NewListPager_prefixListGlobalRulestackListMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + pager := clientFactory.NewPrefixListGlobalRulestackClient().NewListPager("praval", nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + log.Fatalf("failed to advance page: %v", err) + } + for _, v := range page.Value { + // You could use page here. We use blank identifier for just demo purposes. + _ = v + } + // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // page.PrefixListGlobalRulestackResourceListResult = armpanngfw.PrefixListGlobalRulestackResourceListResult{ + // Value: []*armpanngfw.PrefixListGlobalRulestackResource{ + // { + // Name: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // Type: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // ID: to.Ptr("aaaaaaaaaaaaaaaaaa"), + // SystemData: &armpanngfw.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // CreatedBy: to.Ptr("praval"), + // CreatedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // LastModifiedBy: to.Ptr("praval"), + // LastModifiedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // }, + // Properties: &armpanngfw.PrefixObject{ + // Description: to.Ptr("aaaaaaaaaaaaaaaaaaa"), + // AuditComment: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // Etag: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaa"), + // PrefixList: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaa")}, + // ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateSucceeded), + // }, + // }}, + // } + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PrefixListGlobalRulestack_List_MinimumSet_Gen.json +func ExamplePrefixListGlobalRulestackClient_NewListPager_prefixListGlobalRulestackListMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + pager := clientFactory.NewPrefixListGlobalRulestackClient().NewListPager("praval", nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + log.Fatalf("failed to advance page: %v", err) + } + for _, v := range page.Value { + // You could use page here. We use blank identifier for just demo purposes. + _ = v + } + // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // page.PrefixListGlobalRulestackResourceListResult = armpanngfw.PrefixListGlobalRulestackResourceListResult{ + // Value: []*armpanngfw.PrefixListGlobalRulestackResource{ + // { + // ID: to.Ptr("/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/praval/prefixlists/prefixlists1"), + // Properties: &armpanngfw.PrefixObject{ + // PrefixList: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaa")}, + // }, + // }}, + // } + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PrefixListGlobalRulestack_Get_MaximumSet_Gen.json +func ExamplePrefixListGlobalRulestackClient_Get_prefixListGlobalRulestackGetMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewPrefixListGlobalRulestackClient().Get(ctx, "praval", "armid1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.PrefixListGlobalRulestackResource = armpanngfw.PrefixListGlobalRulestackResource{ + // Name: to.Ptr("armid1"), + // Type: to.Ptr("certificates"), + // ID: to.Ptr("/providers/PaloAltoNetworks.Cloudngfw/globalrulestacks/armid1/certificates/armid1"), + // SystemData: &armpanngfw.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // CreatedBy: to.Ptr("praval"), + // CreatedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // LastModifiedBy: to.Ptr("praval"), + // LastModifiedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // }, + // Properties: &armpanngfw.PrefixObject{ + // Description: to.Ptr("string"), + // AuditComment: to.Ptr("comment"), + // Etag: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c27"), + // PrefixList: []*string{ + // to.Ptr("1.0.0.0/24")}, + // ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateSucceeded), + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PrefixListGlobalRulestack_Get_MinimumSet_Gen.json +func ExamplePrefixListGlobalRulestackClient_Get_prefixListGlobalRulestackGetMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewPrefixListGlobalRulestackClient().Get(ctx, "praval", "armid1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.PrefixListGlobalRulestackResource = armpanngfw.PrefixListGlobalRulestackResource{ + // ID: to.Ptr("/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/praval/prefixlists/armid1"), + // Properties: &armpanngfw.PrefixObject{ + // PrefixList: []*string{ + // to.Ptr("1.0.0.0/24")}, + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PrefixListGlobalRulestack_CreateOrUpdate_MaximumSet_Gen.json +func ExamplePrefixListGlobalRulestackClient_BeginCreateOrUpdate_prefixListGlobalRulestackCreateOrUpdateMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewPrefixListGlobalRulestackClient().BeginCreateOrUpdate(ctx, "praval", "armid1", armpanngfw.PrefixListGlobalRulestackResource{ + Properties: &armpanngfw.PrefixObject{ + Description: to.Ptr("string"), + AuditComment: to.Ptr("comment"), + Etag: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c27"), + PrefixList: []*string{ + to.Ptr("1.0.0.0/24")}, + ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateAccepted), + }, + }, nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + res, err := poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.PrefixListGlobalRulestackResource = armpanngfw.PrefixListGlobalRulestackResource{ + // Name: to.Ptr("armid1"), + // Type: to.Ptr("certificates"), + // ID: to.Ptr("/providers/PaloAltoNetworks.Cloudngfw/globalrulestacks/armid1/certificates/armid1"), + // SystemData: &armpanngfw.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // CreatedBy: to.Ptr("praval"), + // CreatedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // LastModifiedBy: to.Ptr("praval"), + // LastModifiedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // }, + // Properties: &armpanngfw.PrefixObject{ + // Description: to.Ptr("string"), + // AuditComment: to.Ptr("comment"), + // Etag: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c27"), + // PrefixList: []*string{ + // to.Ptr("1.0.0.0/24")}, + // ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateSucceeded), + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PrefixListGlobalRulestack_CreateOrUpdate_MinimumSet_Gen.json +func ExamplePrefixListGlobalRulestackClient_BeginCreateOrUpdate_prefixListGlobalRulestackCreateOrUpdateMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewPrefixListGlobalRulestackClient().BeginCreateOrUpdate(ctx, "praval", "armid1", armpanngfw.PrefixListGlobalRulestackResource{ + Properties: &armpanngfw.PrefixObject{ + PrefixList: []*string{ + to.Ptr("1.0.0.0/24")}, + }, + }, nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + res, err := poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.PrefixListGlobalRulestackResource = armpanngfw.PrefixListGlobalRulestackResource{ + // ID: to.Ptr("/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/praval/prefixlists/armid1"), + // Properties: &armpanngfw.PrefixObject{ + // PrefixList: []*string{ + // to.Ptr("1.0.0.0/24")}, + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PrefixListGlobalRulestack_Delete_MaximumSet_Gen.json +func ExamplePrefixListGlobalRulestackClient_BeginDelete_prefixListGlobalRulestackDeleteMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewPrefixListGlobalRulestackClient().BeginDelete(ctx, "praval", "armid1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PrefixListGlobalRulestack_Delete_MinimumSet_Gen.json +func ExamplePrefixListGlobalRulestackClient_BeginDelete_prefixListGlobalRulestackDeleteMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewPrefixListGlobalRulestackClient().BeginDelete(ctx, "praval", "armid1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } +} diff --git a/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/prefixlistlocalrulestack_client.go b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/prefixlistlocalrulestack_client.go new file mode 100644 index 000000000000..da9bd36daec7 --- /dev/null +++ b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/prefixlistlocalrulestack_client.go @@ -0,0 +1,323 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armpanngfw + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// PrefixListLocalRulestackClient contains the methods for the PrefixListLocalRulestack group. +// Don't use this type directly, use NewPrefixListLocalRulestackClient() instead. +type PrefixListLocalRulestackClient struct { + internal *arm.Client + subscriptionID string +} + +// NewPrefixListLocalRulestackClient creates a new instance of PrefixListLocalRulestackClient with the specified values. +// - subscriptionID - The ID of the target subscription. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewPrefixListLocalRulestackClient(subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) (*PrefixListLocalRulestackClient, error) { + cl, err := arm.NewClient(moduleName+".PrefixListLocalRulestackClient", moduleVersion, credential, options) + if err != nil { + return nil, err + } + client := &PrefixListLocalRulestackClient{ + subscriptionID: subscriptionID, + internal: cl, + } + return client, nil +} + +// BeginCreateOrUpdate - Create a PrefixListResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - localRulestackName - LocalRulestack resource name +// - name - Local Rule priority +// - resource - Resource create parameters. +// - options - PrefixListLocalRulestackClientBeginCreateOrUpdateOptions contains the optional parameters for the PrefixListLocalRulestackClient.BeginCreateOrUpdate +// method. +func (client *PrefixListLocalRulestackClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, localRulestackName string, name string, resource PrefixListResource, options *PrefixListLocalRulestackClientBeginCreateOrUpdateOptions) (*runtime.Poller[PrefixListLocalRulestackClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, resourceGroupName, localRulestackName, name, resource, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[PrefixListLocalRulestackClientCreateOrUpdateResponse]{ + FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + }) + } else { + return runtime.NewPollerFromResumeToken[PrefixListLocalRulestackClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// CreateOrUpdate - Create a PrefixListResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +func (client *PrefixListLocalRulestackClient) createOrUpdate(ctx context.Context, resourceGroupName string, localRulestackName string, name string, resource PrefixListResource, options *PrefixListLocalRulestackClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, resourceGroupName, localRulestackName, name, resource, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *PrefixListLocalRulestackClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, name string, resource PrefixListResource, options *PrefixListLocalRulestackClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}/prefixlists/{name}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if localRulestackName == "" { + return nil, errors.New("parameter localRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{localRulestackName}", url.PathEscape(localRulestackName)) + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, resource) +} + +// BeginDelete - Delete a PrefixListResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - localRulestackName - LocalRulestack resource name +// - name - Local Rule priority +// - options - PrefixListLocalRulestackClientBeginDeleteOptions contains the optional parameters for the PrefixListLocalRulestackClient.BeginDelete +// method. +func (client *PrefixListLocalRulestackClient) BeginDelete(ctx context.Context, resourceGroupName string, localRulestackName string, name string, options *PrefixListLocalRulestackClientBeginDeleteOptions) (*runtime.Poller[PrefixListLocalRulestackClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, resourceGroupName, localRulestackName, name, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[PrefixListLocalRulestackClientDeleteResponse]{ + FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + }) + } else { + return runtime.NewPollerFromResumeToken[PrefixListLocalRulestackClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// Delete - Delete a PrefixListResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +func (client *PrefixListLocalRulestackClient) deleteOperation(ctx context.Context, resourceGroupName string, localRulestackName string, name string, options *PrefixListLocalRulestackClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, resourceGroupName, localRulestackName, name, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *PrefixListLocalRulestackClient) deleteCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, name string, options *PrefixListLocalRulestackClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}/prefixlists/{name}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if localRulestackName == "" { + return nil, errors.New("parameter localRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{localRulestackName}", url.PathEscape(localRulestackName)) + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Get a PrefixListResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - localRulestackName - LocalRulestack resource name +// - name - Local Rule priority +// - options - PrefixListLocalRulestackClientGetOptions contains the optional parameters for the PrefixListLocalRulestackClient.Get +// method. +func (client *PrefixListLocalRulestackClient) Get(ctx context.Context, resourceGroupName string, localRulestackName string, name string, options *PrefixListLocalRulestackClientGetOptions) (PrefixListLocalRulestackClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, resourceGroupName, localRulestackName, name, options) + if err != nil { + return PrefixListLocalRulestackClientGetResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PrefixListLocalRulestackClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return PrefixListLocalRulestackClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *PrefixListLocalRulestackClient) getCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, name string, options *PrefixListLocalRulestackClientGetOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}/prefixlists/{name}" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if localRulestackName == "" { + return nil, errors.New("parameter localRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{localRulestackName}", url.PathEscape(localRulestackName)) + if name == "" { + return nil, errors.New("parameter name cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{name}", url.PathEscape(name)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *PrefixListLocalRulestackClient) getHandleResponse(resp *http.Response) (PrefixListLocalRulestackClientGetResponse, error) { + result := PrefixListLocalRulestackClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.PrefixListResource); err != nil { + return PrefixListLocalRulestackClientGetResponse{}, err + } + return result, nil +} + +// NewListByLocalRulestacksPager - List PrefixListResource resources by LocalRulestacks +// +// Generated from API version 2022-08-29-preview +// - resourceGroupName - The name of the resource group. The name is case insensitive. +// - localRulestackName - LocalRulestack resource name +// - options - PrefixListLocalRulestackClientListByLocalRulestacksOptions contains the optional parameters for the PrefixListLocalRulestackClient.NewListByLocalRulestacksPager +// method. +func (client *PrefixListLocalRulestackClient) NewListByLocalRulestacksPager(resourceGroupName string, localRulestackName string, options *PrefixListLocalRulestackClientListByLocalRulestacksOptions) *runtime.Pager[PrefixListLocalRulestackClientListByLocalRulestacksResponse] { + return runtime.NewPager(runtime.PagingHandler[PrefixListLocalRulestackClientListByLocalRulestacksResponse]{ + More: func(page PrefixListLocalRulestackClientListByLocalRulestacksResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *PrefixListLocalRulestackClientListByLocalRulestacksResponse) (PrefixListLocalRulestackClientListByLocalRulestacksResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listByLocalRulestacksCreateRequest(ctx, resourceGroupName, localRulestackName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return PrefixListLocalRulestackClientListByLocalRulestacksResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PrefixListLocalRulestackClientListByLocalRulestacksResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return PrefixListLocalRulestackClientListByLocalRulestacksResponse{}, runtime.NewResponseError(resp) + } + return client.listByLocalRulestacksHandleResponse(resp) + }, + }) +} + +// listByLocalRulestacksCreateRequest creates the ListByLocalRulestacks request. +func (client *PrefixListLocalRulestackClient) listByLocalRulestacksCreateRequest(ctx context.Context, resourceGroupName string, localRulestackName string, options *PrefixListLocalRulestackClientListByLocalRulestacksOptions) (*policy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/{localRulestackName}/prefixlists" + if client.subscriptionID == "" { + return nil, errors.New("parameter client.subscriptionID cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID)) + if resourceGroupName == "" { + return nil, errors.New("parameter resourceGroupName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName)) + if localRulestackName == "" { + return nil, errors.New("parameter localRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{localRulestackName}", url.PathEscape(localRulestackName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listByLocalRulestacksHandleResponse handles the ListByLocalRulestacks response. +func (client *PrefixListLocalRulestackClient) listByLocalRulestacksHandleResponse(resp *http.Response) (PrefixListLocalRulestackClientListByLocalRulestacksResponse, error) { + result := PrefixListLocalRulestackClientListByLocalRulestacksResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.PrefixListResourceListResult); err != nil { + return PrefixListLocalRulestackClientListByLocalRulestacksResponse{}, err + } + return result, nil +} diff --git a/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/prefixlistlocalrulestack_client_example_test.go b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/prefixlistlocalrulestack_client_example_test.go new file mode 100644 index 000000000000..71dc87c5e2c5 --- /dev/null +++ b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/prefixlistlocalrulestack_client_example_test.go @@ -0,0 +1,303 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armpanngfw_test + +import ( + "context" + "log" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw" +) + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PrefixListLocalRulestack_ListByLocalRulestacks_MaximumSet_Gen.json +func ExamplePrefixListLocalRulestackClient_NewListByLocalRulestacksPager_prefixListLocalRulestackListByLocalRulestacksMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + pager := clientFactory.NewPrefixListLocalRulestackClient().NewListByLocalRulestacksPager("rgopenapi", "lrs1", nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + log.Fatalf("failed to advance page: %v", err) + } + for _, v := range page.Value { + // You could use page here. We use blank identifier for just demo purposes. + _ = v + } + // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // page.PrefixListResourceListResult = armpanngfw.PrefixListResourceListResult{ + // Value: []*armpanngfw.PrefixListResource{ + // { + // Name: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // Type: to.Ptr("aa"), + // ID: to.Ptr("aaa"), + // SystemData: &armpanngfw.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // CreatedBy: to.Ptr("praval"), + // CreatedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // LastModifiedBy: to.Ptr("praval"), + // LastModifiedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // }, + // Properties: &armpanngfw.PrefixObject{ + // Description: to.Ptr("aaaaaaaaaaaaaaaaaaa"), + // AuditComment: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // Etag: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaa"), + // PrefixList: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaa")}, + // ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateSucceeded), + // }, + // }}, + // } + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PrefixListLocalRulestack_ListByLocalRulestacks_MinimumSet_Gen.json +func ExamplePrefixListLocalRulestackClient_NewListByLocalRulestacksPager_prefixListLocalRulestackListByLocalRulestacksMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + pager := clientFactory.NewPrefixListLocalRulestackClient().NewListByLocalRulestacksPager("rgopenapi", "lrs1", nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + log.Fatalf("failed to advance page: %v", err) + } + for _, v := range page.Value { + // You could use page here. We use blank identifier for just demo purposes. + _ = v + } + // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // page.PrefixListResourceListResult = armpanngfw.PrefixListResourceListResult{ + // Value: []*armpanngfw.PrefixListResource{ + // { + // ID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourcegroups/rgopenapi/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/praval/prefixlists/prefixlists1"), + // Properties: &armpanngfw.PrefixObject{ + // PrefixList: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaa")}, + // }, + // }}, + // } + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PrefixListLocalRulestack_Get_MaximumSet_Gen.json +func ExamplePrefixListLocalRulestackClient_Get_prefixListLocalRulestackGetMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewPrefixListLocalRulestackClient().Get(ctx, "rgopenapi", "lrs1", "armid1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.PrefixListResource = armpanngfw.PrefixListResource{ + // Name: to.Ptr("armid1"), + // Type: to.Ptr("certificates"), + // ID: to.Ptr("/providers/PaloAltoNetworks.Cloudngfw/globalrulestacks/armid1/certificates/armid1"), + // SystemData: &armpanngfw.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // CreatedBy: to.Ptr("praval"), + // CreatedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // LastModifiedBy: to.Ptr("praval"), + // LastModifiedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // }, + // Properties: &armpanngfw.PrefixObject{ + // Description: to.Ptr("string"), + // AuditComment: to.Ptr("comment"), + // Etag: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c27"), + // PrefixList: []*string{ + // to.Ptr("1.0.0.0/24")}, + // ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateSucceeded), + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PrefixListLocalRulestack_Get_MinimumSet_Gen.json +func ExamplePrefixListLocalRulestackClient_Get_prefixListLocalRulestackGetMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewPrefixListLocalRulestackClient().Get(ctx, "rgopenapi", "lrs1", "armid1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.PrefixListResource = armpanngfw.PrefixListResource{ + // ID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourcegroups/rgopenapi/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/praval/prefixlists/armid1"), + // Properties: &armpanngfw.PrefixObject{ + // PrefixList: []*string{ + // to.Ptr("1.0.0.0/24")}, + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PrefixListLocalRulestack_CreateOrUpdate_MaximumSet_Gen.json +func ExamplePrefixListLocalRulestackClient_BeginCreateOrUpdate_prefixListLocalRulestackCreateOrUpdateMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewPrefixListLocalRulestackClient().BeginCreateOrUpdate(ctx, "rgopenapi", "lrs1", "armid1", armpanngfw.PrefixListResource{ + Properties: &armpanngfw.PrefixObject{ + Description: to.Ptr("string"), + AuditComment: to.Ptr("comment"), + Etag: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c27"), + PrefixList: []*string{ + to.Ptr("1.0.0.0/24")}, + ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateAccepted), + }, + }, nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + res, err := poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.PrefixListResource = armpanngfw.PrefixListResource{ + // Name: to.Ptr("armid1"), + // Type: to.Ptr("certificates"), + // ID: to.Ptr("/providers/PaloAltoNetworks.Cloudngfw/globalrulestacks/armid1/certificates/armid1"), + // SystemData: &armpanngfw.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // CreatedBy: to.Ptr("praval"), + // CreatedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // LastModifiedBy: to.Ptr("praval"), + // LastModifiedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // }, + // Properties: &armpanngfw.PrefixObject{ + // Description: to.Ptr("string"), + // AuditComment: to.Ptr("comment"), + // Etag: to.Ptr("2bf4a339-294d-4c25-b0b2-ef649e9f5c27"), + // PrefixList: []*string{ + // to.Ptr("1.0.0.0/24")}, + // ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateSucceeded), + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PrefixListLocalRulestack_CreateOrUpdate_MinimumSet_Gen.json +func ExamplePrefixListLocalRulestackClient_BeginCreateOrUpdate_prefixListLocalRulestackCreateOrUpdateMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewPrefixListLocalRulestackClient().BeginCreateOrUpdate(ctx, "rgopenapi", "lrs1", "armid1", armpanngfw.PrefixListResource{ + Properties: &armpanngfw.PrefixObject{ + PrefixList: []*string{ + to.Ptr("1.0.0.0/24")}, + }, + }, nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + res, err := poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.PrefixListResource = armpanngfw.PrefixListResource{ + // ID: to.Ptr("/subscriptions/2bf4a339-294d-4c25-b0b2-ef649e9f5c27/resourcegroups/rgopenapi/providers/PaloAltoNetworks.Cloudngfw/localRulestacks/praval/prefixlists/armid1"), + // Properties: &armpanngfw.PrefixObject{ + // PrefixList: []*string{ + // to.Ptr("1.0.0.0/24")}, + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PrefixListLocalRulestack_Delete_MaximumSet_Gen.json +func ExamplePrefixListLocalRulestackClient_BeginDelete_prefixListLocalRulestackDeleteMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewPrefixListLocalRulestackClient().BeginDelete(ctx, "rgopenapi", "lrs1", "armid1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PrefixListLocalRulestack_Delete_MinimumSet_Gen.json +func ExamplePrefixListLocalRulestackClient_BeginDelete_prefixListLocalRulestackDeleteMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewPrefixListLocalRulestackClient().BeginDelete(ctx, "rgopenapi", "lrs1", "armid1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } +} diff --git a/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/prerules_client.go b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/prerules_client.go new file mode 100644 index 000000000000..4fbd9b8ed729 --- /dev/null +++ b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/prerules_client.go @@ -0,0 +1,441 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armpanngfw + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "net/url" + "strings" +) + +// PreRulesClient contains the methods for the PreRules group. +// Don't use this type directly, use NewPreRulesClient() instead. +type PreRulesClient struct { + internal *arm.Client +} + +// NewPreRulesClient creates a new instance of PreRulesClient with the specified values. +// - credential - used to authorize requests. Usually a credential from azidentity. +// - options - pass nil to accept the default values. +func NewPreRulesClient(credential azcore.TokenCredential, options *arm.ClientOptions) (*PreRulesClient, error) { + cl, err := arm.NewClient(moduleName+".PreRulesClient", moduleVersion, credential, options) + if err != nil { + return nil, err + } + client := &PreRulesClient{ + internal: cl, + } + return client, nil +} + +// BeginCreateOrUpdate - Create a PreRulesResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - globalRulestackName - GlobalRulestack resource name +// - priority - Pre Rule priority +// - resource - Resource create parameters. +// - options - PreRulesClientBeginCreateOrUpdateOptions contains the optional parameters for the PreRulesClient.BeginCreateOrUpdate +// method. +func (client *PreRulesClient) BeginCreateOrUpdate(ctx context.Context, globalRulestackName string, priority string, resource PreRulesResource, options *PreRulesClientBeginCreateOrUpdateOptions) (*runtime.Poller[PreRulesClientCreateOrUpdateResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.createOrUpdate(ctx, globalRulestackName, priority, resource, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[PreRulesClientCreateOrUpdateResponse]{ + FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + }) + } else { + return runtime.NewPollerFromResumeToken[PreRulesClientCreateOrUpdateResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// CreateOrUpdate - Create a PreRulesResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +func (client *PreRulesClient) createOrUpdate(ctx context.Context, globalRulestackName string, priority string, resource PreRulesResource, options *PreRulesClientBeginCreateOrUpdateOptions) (*http.Response, error) { + req, err := client.createOrUpdateCreateRequest(ctx, globalRulestackName, priority, resource, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// createOrUpdateCreateRequest creates the CreateOrUpdate request. +func (client *PreRulesClient) createOrUpdateCreateRequest(ctx context.Context, globalRulestackName string, priority string, resource PreRulesResource, options *PreRulesClientBeginCreateOrUpdateOptions) (*policy.Request, error) { + urlPath := "/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/{globalRulestackName}/preRules/{priority}" + if globalRulestackName == "" { + return nil, errors.New("parameter globalRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{globalRulestackName}", url.PathEscape(globalRulestackName)) + if priority == "" { + return nil, errors.New("parameter priority cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{priority}", url.PathEscape(priority)) + req, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, runtime.MarshalAsJSON(req, resource) +} + +// BeginDelete - Delete a PreRulesResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - globalRulestackName - GlobalRulestack resource name +// - priority - Pre Rule priority +// - options - PreRulesClientBeginDeleteOptions contains the optional parameters for the PreRulesClient.BeginDelete method. +func (client *PreRulesClient) BeginDelete(ctx context.Context, globalRulestackName string, priority string, options *PreRulesClientBeginDeleteOptions) (*runtime.Poller[PreRulesClientDeleteResponse], error) { + if options == nil || options.ResumeToken == "" { + resp, err := client.deleteOperation(ctx, globalRulestackName, priority, options) + if err != nil { + return nil, err + } + return runtime.NewPoller(resp, client.internal.Pipeline(), &runtime.NewPollerOptions[PreRulesClientDeleteResponse]{ + FinalStateVia: runtime.FinalStateViaAzureAsyncOp, + }) + } else { + return runtime.NewPollerFromResumeToken[PreRulesClientDeleteResponse](options.ResumeToken, client.internal.Pipeline(), nil) + } +} + +// Delete - Delete a PreRulesResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +func (client *PreRulesClient) deleteOperation(ctx context.Context, globalRulestackName string, priority string, options *PreRulesClientBeginDeleteOptions) (*http.Response, error) { + req, err := client.deleteCreateRequest(ctx, globalRulestackName, priority, options) + if err != nil { + return nil, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return nil, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusNoContent) { + return nil, runtime.NewResponseError(resp) + } + return resp, nil +} + +// deleteCreateRequest creates the Delete request. +func (client *PreRulesClient) deleteCreateRequest(ctx context.Context, globalRulestackName string, priority string, options *PreRulesClientBeginDeleteOptions) (*policy.Request, error) { + urlPath := "/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/{globalRulestackName}/preRules/{priority}" + if globalRulestackName == "" { + return nil, errors.New("parameter globalRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{globalRulestackName}", url.PathEscape(globalRulestackName)) + if priority == "" { + return nil, errors.New("parameter priority cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{priority}", url.PathEscape(priority)) + req, err := runtime.NewRequest(ctx, http.MethodDelete, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// Get - Get a PreRulesResource +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - globalRulestackName - GlobalRulestack resource name +// - priority - Pre Rule priority +// - options - PreRulesClientGetOptions contains the optional parameters for the PreRulesClient.Get method. +func (client *PreRulesClient) Get(ctx context.Context, globalRulestackName string, priority string, options *PreRulesClientGetOptions) (PreRulesClientGetResponse, error) { + req, err := client.getCreateRequest(ctx, globalRulestackName, priority, options) + if err != nil { + return PreRulesClientGetResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PreRulesClientGetResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return PreRulesClientGetResponse{}, runtime.NewResponseError(resp) + } + return client.getHandleResponse(resp) +} + +// getCreateRequest creates the Get request. +func (client *PreRulesClient) getCreateRequest(ctx context.Context, globalRulestackName string, priority string, options *PreRulesClientGetOptions) (*policy.Request, error) { + urlPath := "/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/{globalRulestackName}/preRules/{priority}" + if globalRulestackName == "" { + return nil, errors.New("parameter globalRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{globalRulestackName}", url.PathEscape(globalRulestackName)) + if priority == "" { + return nil, errors.New("parameter priority cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{priority}", url.PathEscape(priority)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *PreRulesClient) getHandleResponse(resp *http.Response) (PreRulesClientGetResponse, error) { + result := PreRulesClientGetResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.PreRulesResource); err != nil { + return PreRulesClientGetResponse{}, err + } + return result, nil +} + +// GetCounters - Get counters +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - globalRulestackName - GlobalRulestack resource name +// - priority - Pre Rule priority +// - options - PreRulesClientGetCountersOptions contains the optional parameters for the PreRulesClient.GetCounters method. +func (client *PreRulesClient) GetCounters(ctx context.Context, globalRulestackName string, priority string, options *PreRulesClientGetCountersOptions) (PreRulesClientGetCountersResponse, error) { + req, err := client.getCountersCreateRequest(ctx, globalRulestackName, priority, options) + if err != nil { + return PreRulesClientGetCountersResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PreRulesClientGetCountersResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return PreRulesClientGetCountersResponse{}, runtime.NewResponseError(resp) + } + return client.getCountersHandleResponse(resp) +} + +// getCountersCreateRequest creates the GetCounters request. +func (client *PreRulesClient) getCountersCreateRequest(ctx context.Context, globalRulestackName string, priority string, options *PreRulesClientGetCountersOptions) (*policy.Request, error) { + urlPath := "/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/{globalRulestackName}/preRules/{priority}/getCounters" + if globalRulestackName == "" { + return nil, errors.New("parameter globalRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{globalRulestackName}", url.PathEscape(globalRulestackName)) + if priority == "" { + return nil, errors.New("parameter priority cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{priority}", url.PathEscape(priority)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + if options != nil && options.FirewallName != nil { + reqQP.Set("firewallName", *options.FirewallName) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getCountersHandleResponse handles the GetCounters response. +func (client *PreRulesClient) getCountersHandleResponse(resp *http.Response) (PreRulesClientGetCountersResponse, error) { + result := PreRulesClientGetCountersResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.RuleCounter); err != nil { + return PreRulesClientGetCountersResponse{}, err + } + return result, nil +} + +// NewListPager - List PreRulesResource resources by Tenant +// +// Generated from API version 2022-08-29-preview +// - globalRulestackName - GlobalRulestack resource name +// - options - PreRulesClientListOptions contains the optional parameters for the PreRulesClient.NewListPager method. +func (client *PreRulesClient) NewListPager(globalRulestackName string, options *PreRulesClientListOptions) *runtime.Pager[PreRulesClientListResponse] { + return runtime.NewPager(runtime.PagingHandler[PreRulesClientListResponse]{ + More: func(page PreRulesClientListResponse) bool { + return page.NextLink != nil && len(*page.NextLink) > 0 + }, + Fetcher: func(ctx context.Context, page *PreRulesClientListResponse) (PreRulesClientListResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = client.listCreateRequest(ctx, globalRulestackName, options) + } else { + req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink) + } + if err != nil { + return PreRulesClientListResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PreRulesClientListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return PreRulesClientListResponse{}, runtime.NewResponseError(resp) + } + return client.listHandleResponse(resp) + }, + }) +} + +// listCreateRequest creates the List request. +func (client *PreRulesClient) listCreateRequest(ctx context.Context, globalRulestackName string, options *PreRulesClientListOptions) (*policy.Request, error) { + urlPath := "/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/{globalRulestackName}/preRules" + if globalRulestackName == "" { + return nil, errors.New("parameter globalRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{globalRulestackName}", url.PathEscape(globalRulestackName)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// listHandleResponse handles the List response. +func (client *PreRulesClient) listHandleResponse(resp *http.Response) (PreRulesClientListResponse, error) { + result := PreRulesClientListResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.PreRulesResourceListResult); err != nil { + return PreRulesClientListResponse{}, err + } + return result, nil +} + +// RefreshCounters - Refresh counters +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - globalRulestackName - GlobalRulestack resource name +// - priority - Pre Rule priority +// - options - PreRulesClientRefreshCountersOptions contains the optional parameters for the PreRulesClient.RefreshCounters +// method. +func (client *PreRulesClient) RefreshCounters(ctx context.Context, globalRulestackName string, priority string, options *PreRulesClientRefreshCountersOptions) (PreRulesClientRefreshCountersResponse, error) { + req, err := client.refreshCountersCreateRequest(ctx, globalRulestackName, priority, options) + if err != nil { + return PreRulesClientRefreshCountersResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PreRulesClientRefreshCountersResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusNoContent) { + return PreRulesClientRefreshCountersResponse{}, runtime.NewResponseError(resp) + } + return PreRulesClientRefreshCountersResponse{}, nil +} + +// refreshCountersCreateRequest creates the RefreshCounters request. +func (client *PreRulesClient) refreshCountersCreateRequest(ctx context.Context, globalRulestackName string, priority string, options *PreRulesClientRefreshCountersOptions) (*policy.Request, error) { + urlPath := "/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/{globalRulestackName}/preRules/{priority}/refreshCounters" + if globalRulestackName == "" { + return nil, errors.New("parameter globalRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{globalRulestackName}", url.PathEscape(globalRulestackName)) + if priority == "" { + return nil, errors.New("parameter priority cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{priority}", url.PathEscape(priority)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + if options != nil && options.FirewallName != nil { + reqQP.Set("firewallName", *options.FirewallName) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// ResetCounters - Reset counters +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2022-08-29-preview +// - globalRulestackName - GlobalRulestack resource name +// - priority - Pre Rule priority +// - options - PreRulesClientResetCountersOptions contains the optional parameters for the PreRulesClient.ResetCounters method. +func (client *PreRulesClient) ResetCounters(ctx context.Context, globalRulestackName string, priority string, options *PreRulesClientResetCountersOptions) (PreRulesClientResetCountersResponse, error) { + req, err := client.resetCountersCreateRequest(ctx, globalRulestackName, priority, options) + if err != nil { + return PreRulesClientResetCountersResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return PreRulesClientResetCountersResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return PreRulesClientResetCountersResponse{}, runtime.NewResponseError(resp) + } + return client.resetCountersHandleResponse(resp) +} + +// resetCountersCreateRequest creates the ResetCounters request. +func (client *PreRulesClient) resetCountersCreateRequest(ctx context.Context, globalRulestackName string, priority string, options *PreRulesClientResetCountersOptions) (*policy.Request, error) { + urlPath := "/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/{globalRulestackName}/preRules/{priority}/resetCounters" + if globalRulestackName == "" { + return nil, errors.New("parameter globalRulestackName cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{globalRulestackName}", url.PathEscape(globalRulestackName)) + if priority == "" { + return nil, errors.New("parameter priority cannot be empty") + } + urlPath = strings.ReplaceAll(urlPath, "{priority}", url.PathEscape(priority)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.internal.Endpoint(), urlPath)) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("api-version", "2022-08-29-preview") + if options != nil && options.FirewallName != nil { + reqQP.Set("firewallName", *options.FirewallName) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// resetCountersHandleResponse handles the ResetCounters response. +func (client *PreRulesClient) resetCountersHandleResponse(resp *http.Response) (PreRulesClientResetCountersResponse, error) { + result := PreRulesClientResetCountersResponse{} + if err := runtime.UnmarshalAsJSON(resp, &result.RuleCounterReset); err != nil { + return PreRulesClientResetCountersResponse{}, err + } + return result, nil +} diff --git a/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/prerules_client_example_test.go b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/prerules_client_example_test.go new file mode 100644 index 000000000000..118cba83affd --- /dev/null +++ b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/prerules_client_example_test.go @@ -0,0 +1,629 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armpanngfw_test + +import ( + "context" + "log" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw" +) + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PreRules_List_MaximumSet_Gen.json +func ExamplePreRulesClient_NewListPager_preRulesListMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + pager := clientFactory.NewPreRulesClient().NewListPager("lrs1", nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + log.Fatalf("failed to advance page: %v", err) + } + for _, v := range page.Value { + // You could use page here. We use blank identifier for just demo purposes. + _ = v + } + // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // page.PreRulesResourceListResult = armpanngfw.PreRulesResourceListResult{ + // Value: []*armpanngfw.PreRulesResource{ + // { + // Name: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // Type: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaa"), + // ID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaa"), + // SystemData: &armpanngfw.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // CreatedBy: to.Ptr("praval"), + // CreatedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // LastModifiedBy: to.Ptr("praval"), + // LastModifiedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // }, + // Properties: &armpanngfw.RuleEntry{ + // Description: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // ActionType: to.Ptr(armpanngfw.ActionEnumAllow), + // Applications: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")}, + // AuditComment: to.Ptr("aaa"), + // Category: &armpanngfw.Category{ + // Feeds: []*string{ + // to.Ptr("aaaaaaaaaaaa")}, + // URLCustom: []*string{ + // to.Ptr("aaaaa")}, + // }, + // DecryptionRuleType: to.Ptr(armpanngfw.DecryptionRuleTypeEnumSSLOutboundInspection), + // Destination: &armpanngfw.DestinationAddr{ + // Cidrs: []*string{ + // to.Ptr("aaaaaaa")}, + // Countries: []*string{ + // to.Ptr("aaaaaaaaaaaaaa")}, + // Feeds: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaa")}, + // FqdnLists: []*string{ + // to.Ptr("aaaaaaaaaaaaa")}, + // PrefixLists: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")}, + // }, + // EnableLogging: to.Ptr(armpanngfw.StateEnumDISABLED), + // Etag: to.Ptr("aaaaaaaaaaaaaaaaaaaa"), + // InboundInspectionCertificate: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // NegateDestination: to.Ptr(armpanngfw.BooleanEnumTRUE), + // NegateSource: to.Ptr(armpanngfw.BooleanEnumTRUE), + // Priority: to.Ptr[int32](24), + // ProtocolPortList: []*string{ + // to.Ptr("aaaaaaaaaaaa")}, + // ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateSucceeded), + // RuleName: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaa"), + // RuleState: to.Ptr(armpanngfw.StateEnumDISABLED), + // Source: &armpanngfw.SourceAddr{ + // Cidrs: []*string{ + // to.Ptr("aaa")}, + // Countries: []*string{ + // to.Ptr("aaaaa")}, + // Feeds: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaa")}, + // PrefixLists: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaa")}, + // }, + // Tags: []*armpanngfw.TagInfo{ + // { + // Key: to.Ptr("keyName"), + // Value: to.Ptr("value"), + // }}, + // Protocol: to.Ptr("aaaa"), + // }, + // }}, + // } + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PreRules_List_MinimumSet_Gen.json +func ExamplePreRulesClient_NewListPager_preRulesListMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + pager := clientFactory.NewPreRulesClient().NewListPager("lrs1", nil) + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + log.Fatalf("failed to advance page: %v", err) + } + for _, v := range page.Value { + // You could use page here. We use blank identifier for just demo purposes. + _ = v + } + // If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // page.PreRulesResourceListResult = armpanngfw.PreRulesResourceListResult{ + // Value: []*armpanngfw.PreRulesResource{ + // { + // ID: to.Ptr("/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/lrs1/prerules/1"), + // Properties: &armpanngfw.RuleEntry{ + // RuleName: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaa"), + // }, + // }}, + // } + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PreRules_Get_MaximumSet_Gen.json +func ExamplePreRulesClient_Get_preRulesGetMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewPreRulesClient().Get(ctx, "lrs1", "1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.PreRulesResource = armpanngfw.PreRulesResource{ + // Name: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // Type: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaa"), + // ID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaa"), + // SystemData: &armpanngfw.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // CreatedBy: to.Ptr("praval"), + // CreatedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // LastModifiedBy: to.Ptr("praval"), + // LastModifiedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // }, + // Properties: &armpanngfw.RuleEntry{ + // Description: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // ActionType: to.Ptr(armpanngfw.ActionEnumAllow), + // Applications: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")}, + // AuditComment: to.Ptr("aaa"), + // Category: &armpanngfw.Category{ + // Feeds: []*string{ + // to.Ptr("aaaaaaaaaaaa")}, + // URLCustom: []*string{ + // to.Ptr("aaaaa")}, + // }, + // DecryptionRuleType: to.Ptr(armpanngfw.DecryptionRuleTypeEnumSSLOutboundInspection), + // Destination: &armpanngfw.DestinationAddr{ + // Cidrs: []*string{ + // to.Ptr("aaaaaaa")}, + // Countries: []*string{ + // to.Ptr("aaaaaaaaaaaaaa")}, + // Feeds: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaa")}, + // FqdnLists: []*string{ + // to.Ptr("aaaaaaaaaaaaa")}, + // PrefixLists: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")}, + // }, + // EnableLogging: to.Ptr(armpanngfw.StateEnumDISABLED), + // Etag: to.Ptr("aaaaaaaaaaaaaaaaaaaa"), + // InboundInspectionCertificate: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // NegateDestination: to.Ptr(armpanngfw.BooleanEnumTRUE), + // NegateSource: to.Ptr(armpanngfw.BooleanEnumTRUE), + // Priority: to.Ptr[int32](24), + // ProtocolPortList: []*string{ + // to.Ptr("aaaaaaaaaaaa")}, + // ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateSucceeded), + // RuleName: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaa"), + // RuleState: to.Ptr(armpanngfw.StateEnumDISABLED), + // Source: &armpanngfw.SourceAddr{ + // Cidrs: []*string{ + // to.Ptr("aaa")}, + // Countries: []*string{ + // to.Ptr("aaaaa")}, + // Feeds: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaa")}, + // PrefixLists: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaa")}, + // }, + // Tags: []*armpanngfw.TagInfo{ + // { + // Key: to.Ptr("keyName"), + // Value: to.Ptr("value"), + // }}, + // Protocol: to.Ptr("aaaa"), + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PreRules_Get_MinimumSet_Gen.json +func ExamplePreRulesClient_Get_preRulesGetMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewPreRulesClient().Get(ctx, "lrs1", "1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.PreRulesResource = armpanngfw.PreRulesResource{ + // ID: to.Ptr("/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/lrs1/prerules/1"), + // Properties: &armpanngfw.RuleEntry{ + // RuleName: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaa"), + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PreRules_CreateOrUpdate_MaximumSet_Gen.json +func ExamplePreRulesClient_BeginCreateOrUpdate_preRulesCreateOrUpdateMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewPreRulesClient().BeginCreateOrUpdate(ctx, "lrs1", "1", armpanngfw.PreRulesResource{ + Properties: &armpanngfw.RuleEntry{ + Description: to.Ptr("description of pre rule"), + ActionType: to.Ptr(armpanngfw.ActionEnumAllow), + Applications: []*string{ + to.Ptr("app1")}, + AuditComment: to.Ptr("example comment"), + Category: &armpanngfw.Category{ + Feeds: []*string{ + to.Ptr("feed")}, + URLCustom: []*string{ + to.Ptr("https://microsoft.com")}, + }, + DecryptionRuleType: to.Ptr(armpanngfw.DecryptionRuleTypeEnumSSLOutboundInspection), + Destination: &armpanngfw.DestinationAddr{ + Cidrs: []*string{ + to.Ptr("1.0.0.1/10")}, + Countries: []*string{ + to.Ptr("India")}, + Feeds: []*string{ + to.Ptr("feed")}, + FqdnLists: []*string{ + to.Ptr("FQDN1")}, + PrefixLists: []*string{ + to.Ptr("PL1")}, + }, + EnableLogging: to.Ptr(armpanngfw.StateEnumDISABLED), + Etag: to.Ptr("c18e6eef-ba3e-49ee-8a85-2b36c863a9d0"), + InboundInspectionCertificate: to.Ptr("cert1"), + NegateDestination: to.Ptr(armpanngfw.BooleanEnumTRUE), + NegateSource: to.Ptr(armpanngfw.BooleanEnumTRUE), + ProtocolPortList: []*string{ + to.Ptr("80")}, + ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateAccepted), + RuleName: to.Ptr("preRule1"), + RuleState: to.Ptr(armpanngfw.StateEnumDISABLED), + Source: &armpanngfw.SourceAddr{ + Cidrs: []*string{ + to.Ptr("1.0.0.1/10")}, + Countries: []*string{ + to.Ptr("India")}, + Feeds: []*string{ + to.Ptr("feed")}, + PrefixLists: []*string{ + to.Ptr("PL1")}, + }, + Tags: []*armpanngfw.TagInfo{ + { + Key: to.Ptr("keyName"), + Value: to.Ptr("value"), + }}, + Protocol: to.Ptr("HTTP"), + }, + }, nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + res, err := poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.PreRulesResource = armpanngfw.PreRulesResource{ + // Name: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // Type: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaa"), + // ID: to.Ptr("aaaaaaaaaaaaaaaaaaaaaa"), + // SystemData: &armpanngfw.SystemData{ + // CreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // CreatedBy: to.Ptr("praval"), + // CreatedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // LastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // LastModifiedBy: to.Ptr("praval"), + // LastModifiedByType: to.Ptr(armpanngfw.CreatedByTypeUser), + // }, + // Properties: &armpanngfw.RuleEntry{ + // Description: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // ActionType: to.Ptr(armpanngfw.ActionEnumAllow), + // Applications: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")}, + // AuditComment: to.Ptr("aaa"), + // Category: &armpanngfw.Category{ + // Feeds: []*string{ + // to.Ptr("aaaaaaaaaaaa")}, + // URLCustom: []*string{ + // to.Ptr("aaaaa")}, + // }, + // DecryptionRuleType: to.Ptr(armpanngfw.DecryptionRuleTypeEnumSSLOutboundInspection), + // Destination: &armpanngfw.DestinationAddr{ + // Cidrs: []*string{ + // to.Ptr("aaaaaaa")}, + // Countries: []*string{ + // to.Ptr("aaaaaaaaaaaaaa")}, + // Feeds: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaa")}, + // FqdnLists: []*string{ + // to.Ptr("aaaaaaaaaaaaa")}, + // PrefixLists: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")}, + // }, + // EnableLogging: to.Ptr(armpanngfw.StateEnumDISABLED), + // Etag: to.Ptr("aaaaaaaaaaaaaaaaaaaa"), + // InboundInspectionCertificate: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // NegateDestination: to.Ptr(armpanngfw.BooleanEnumTRUE), + // NegateSource: to.Ptr(armpanngfw.BooleanEnumTRUE), + // Priority: to.Ptr[int32](24), + // ProtocolPortList: []*string{ + // to.Ptr("aaaaaaaaaaaa")}, + // ProvisioningState: to.Ptr(armpanngfw.ProvisioningStateSucceeded), + // RuleName: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaa"), + // RuleState: to.Ptr(armpanngfw.StateEnumDISABLED), + // Source: &armpanngfw.SourceAddr{ + // Cidrs: []*string{ + // to.Ptr("aaa")}, + // Countries: []*string{ + // to.Ptr("aaaaa")}, + // Feeds: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaa")}, + // PrefixLists: []*string{ + // to.Ptr("aaaaaaaaaaaaaaaaaaaa")}, + // }, + // Tags: []*armpanngfw.TagInfo{ + // { + // Key: to.Ptr("keyName"), + // Value: to.Ptr("value"), + // }}, + // Protocol: to.Ptr("aaaa"), + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PreRules_CreateOrUpdate_MinimumSet_Gen.json +func ExamplePreRulesClient_BeginCreateOrUpdate_preRulesCreateOrUpdateMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewPreRulesClient().BeginCreateOrUpdate(ctx, "lrs1", "1", armpanngfw.PreRulesResource{ + Properties: &armpanngfw.RuleEntry{ + RuleName: to.Ptr("preRule1"), + }, + }, nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + res, err := poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.PreRulesResource = armpanngfw.PreRulesResource{ + // ID: to.Ptr("/providers/PaloAltoNetworks.Cloudngfw/globalRulestacks/lrs1/prerules/1"), + // Properties: &armpanngfw.RuleEntry{ + // RuleName: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaa"), + // }, + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PreRules_Delete_MaximumSet_Gen.json +func ExamplePreRulesClient_BeginDelete_preRulesDeleteMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewPreRulesClient().BeginDelete(ctx, "lrs1", "1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PreRules_Delete_MinimumSet_Gen.json +func ExamplePreRulesClient_BeginDelete_preRulesDeleteMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + poller, err := clientFactory.NewPreRulesClient().BeginDelete(ctx, "lrs1", "1", nil) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + log.Fatalf("failed to pull the result: %v", err) + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PreRules_getCounters_MaximumSet_Gen.json +func ExamplePreRulesClient_GetCounters_preRulesGetCountersMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewPreRulesClient().GetCounters(ctx, "lrs1", "1", &armpanngfw.PreRulesClientGetCountersOptions{FirewallName: to.Ptr("firewall1")}) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.RuleCounter = armpanngfw.RuleCounter{ + // AppSeen: &armpanngfw.AppSeenData{ + // AppSeenList: []*armpanngfw.AppSeenInfo{ + // { + // Category: to.Ptr("aaaaaaaaaaaaaaaaaaa"), + // Risk: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaa"), + // StandardPorts: to.Ptr("aaaaaaaaaaaaaaaaaa"), + // SubCategory: to.Ptr("aaaaaaaaaaaaaaaaa"), + // Tag: to.Ptr("aaaaaaaaaa"), + // Technology: to.Ptr("aaaaaaaaaaaaaaaaaaaa"), + // Title: to.Ptr("aaaaaaaaaaaaaaaaaaaaaaaaaaaa"), + // }}, + // Count: to.Ptr[int32](13), + // }, + // FirewallName: to.Ptr("aaaaaaaaaaaaaaaaaa"), + // HitCount: to.Ptr[int32](20), + // LastUpdatedTimestamp: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // Priority: to.Ptr("aaaaaaaaaaaaaaaaaaaa"), + // RequestTimestamp: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // RuleListName: to.Ptr("aaaaaaaaaaaaaaaaaaa"), + // RuleName: to.Ptr("aaaa"), + // RuleStackName: to.Ptr("aaaaaaaaaaaaaaaaa"), + // Timestamp: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, "2022-09-09T05:08:24.229Z"); return t}()), + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PreRules_getCounters_MinimumSet_Gen.json +func ExamplePreRulesClient_GetCounters_preRulesGetCountersMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewPreRulesClient().GetCounters(ctx, "lrs1", "1", &armpanngfw.PreRulesClientGetCountersOptions{FirewallName: nil}) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.RuleCounter = armpanngfw.RuleCounter{ + // Priority: to.Ptr("aaaaaaaaaaaaaaaaaaaa"), + // RuleName: to.Ptr("aaaa"), + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PreRules_refreshCounters_MaximumSet_Gen.json +func ExamplePreRulesClient_RefreshCounters_preRulesRefreshCountersMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + _, err = clientFactory.NewPreRulesClient().RefreshCounters(ctx, "lrs1", "1", &armpanngfw.PreRulesClientRefreshCountersOptions{FirewallName: to.Ptr("firewall1")}) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PreRules_refreshCounters_MinimumSet_Gen.json +func ExamplePreRulesClient_RefreshCounters_preRulesRefreshCountersMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + _, err = clientFactory.NewPreRulesClient().RefreshCounters(ctx, "lrs1", "1", &armpanngfw.PreRulesClientRefreshCountersOptions{FirewallName: nil}) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PreRules_resetCounters_MaximumSet_Gen.json +func ExamplePreRulesClient_ResetCounters_preRulesResetCountersMaximumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewPreRulesClient().ResetCounters(ctx, "lrs1", "1", &armpanngfw.PreRulesClientResetCountersOptions{FirewallName: to.Ptr("firewall1")}) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.RuleCounterReset = armpanngfw.RuleCounterReset{ + // FirewallName: to.Ptr("aaaaaaaaaaaaaaaaaa"), + // Priority: to.Ptr("aaaaaaa"), + // RuleListName: to.Ptr("aaaaa"), + // RuleName: to.Ptr("aaaaa"), + // RuleStackName: to.Ptr("aa"), + // } +} + +// Generated from example definition: https://github.com/Azure/azure-rest-api-specs/blob/fdf43f2fdacf17bd78c0621df44a5c024b61db82/specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/PreRules_resetCounters_MinimumSet_Gen.json +func ExamplePreRulesClient_ResetCounters_preRulesResetCountersMinimumSetGen() { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Fatalf("failed to obtain a credential: %v", err) + } + ctx := context.Background() + clientFactory, err := armpanngfw.NewClientFactory("", cred, nil) + if err != nil { + log.Fatalf("failed to create client: %v", err) + } + res, err := clientFactory.NewPreRulesClient().ResetCounters(ctx, "lrs1", "1", &armpanngfw.PreRulesClientResetCountersOptions{FirewallName: nil}) + if err != nil { + log.Fatalf("failed to finish the request: %v", err) + } + // You could use response here. We use blank identifier for just demo purposes. + _ = res + // If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes. + // res.RuleCounterReset = armpanngfw.RuleCounterReset{ + // } +} diff --git a/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/response_types.go b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/response_types.go new file mode 100644 index 000000000000..45565bcd1f6d --- /dev/null +++ b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/response_types.go @@ -0,0 +1,450 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armpanngfw + +// CertificateObjectGlobalRulestackClientCreateOrUpdateResponse contains the response from method CertificateObjectGlobalRulestackClient.BeginCreateOrUpdate. +type CertificateObjectGlobalRulestackClientCreateOrUpdateResponse struct { + CertificateObjectGlobalRulestackResource +} + +// CertificateObjectGlobalRulestackClientDeleteResponse contains the response from method CertificateObjectGlobalRulestackClient.BeginDelete. +type CertificateObjectGlobalRulestackClientDeleteResponse struct { + // placeholder for future response values +} + +// CertificateObjectGlobalRulestackClientGetResponse contains the response from method CertificateObjectGlobalRulestackClient.Get. +type CertificateObjectGlobalRulestackClientGetResponse struct { + CertificateObjectGlobalRulestackResource +} + +// CertificateObjectGlobalRulestackClientListResponse contains the response from method CertificateObjectGlobalRulestackClient.NewListPager. +type CertificateObjectGlobalRulestackClientListResponse struct { + CertificateObjectGlobalRulestackResourceListResult +} + +// CertificateObjectLocalRulestackClientCreateOrUpdateResponse contains the response from method CertificateObjectLocalRulestackClient.BeginCreateOrUpdate. +type CertificateObjectLocalRulestackClientCreateOrUpdateResponse struct { + CertificateObjectLocalRulestackResource +} + +// CertificateObjectLocalRulestackClientDeleteResponse contains the response from method CertificateObjectLocalRulestackClient.BeginDelete. +type CertificateObjectLocalRulestackClientDeleteResponse struct { + // placeholder for future response values +} + +// CertificateObjectLocalRulestackClientGetResponse contains the response from method CertificateObjectLocalRulestackClient.Get. +type CertificateObjectLocalRulestackClientGetResponse struct { + CertificateObjectLocalRulestackResource +} + +// CertificateObjectLocalRulestackClientListByLocalRulestacksResponse contains the response from method CertificateObjectLocalRulestackClient.NewListByLocalRulestacksPager. +type CertificateObjectLocalRulestackClientListByLocalRulestacksResponse struct { + CertificateObjectLocalRulestackResourceListResult +} + +// FirewallStatusClientGetResponse contains the response from method FirewallStatusClient.Get. +type FirewallStatusClientGetResponse struct { + FirewallStatusResource +} + +// FirewallStatusClientListByFirewallsResponse contains the response from method FirewallStatusClient.NewListByFirewallsPager. +type FirewallStatusClientListByFirewallsResponse struct { + FirewallStatusResourceListResult +} + +// FirewallsClientCreateOrUpdateResponse contains the response from method FirewallsClient.BeginCreateOrUpdate. +type FirewallsClientCreateOrUpdateResponse struct { + FirewallResource +} + +// FirewallsClientDeleteResponse contains the response from method FirewallsClient.BeginDelete. +type FirewallsClientDeleteResponse struct { + // placeholder for future response values +} + +// FirewallsClientGetGlobalRulestackResponse contains the response from method FirewallsClient.GetGlobalRulestack. +type FirewallsClientGetGlobalRulestackResponse struct { + GlobalRulestackInfo +} + +// FirewallsClientGetLogProfileResponse contains the response from method FirewallsClient.GetLogProfile. +type FirewallsClientGetLogProfileResponse struct { + LogSettings +} + +// FirewallsClientGetResponse contains the response from method FirewallsClient.Get. +type FirewallsClientGetResponse struct { + FirewallResource +} + +// FirewallsClientGetSupportInfoResponse contains the response from method FirewallsClient.GetSupportInfo. +type FirewallsClientGetSupportInfoResponse struct { + SupportInfo +} + +// FirewallsClientListByResourceGroupResponse contains the response from method FirewallsClient.NewListByResourceGroupPager. +type FirewallsClientListByResourceGroupResponse struct { + FirewallResourceListResult +} + +// FirewallsClientListBySubscriptionResponse contains the response from method FirewallsClient.NewListBySubscriptionPager. +type FirewallsClientListBySubscriptionResponse struct { + FirewallResourceListResult +} + +// FirewallsClientSaveLogProfileResponse contains the response from method FirewallsClient.SaveLogProfile. +type FirewallsClientSaveLogProfileResponse struct { + // placeholder for future response values +} + +// FirewallsClientUpdateResponse contains the response from method FirewallsClient.Update. +type FirewallsClientUpdateResponse struct { + FirewallResource +} + +// FqdnListGlobalRulestackClientCreateOrUpdateResponse contains the response from method FqdnListGlobalRulestackClient.BeginCreateOrUpdate. +type FqdnListGlobalRulestackClientCreateOrUpdateResponse struct { + FqdnListGlobalRulestackResource +} + +// FqdnListGlobalRulestackClientDeleteResponse contains the response from method FqdnListGlobalRulestackClient.BeginDelete. +type FqdnListGlobalRulestackClientDeleteResponse struct { + // placeholder for future response values +} + +// FqdnListGlobalRulestackClientGetResponse contains the response from method FqdnListGlobalRulestackClient.Get. +type FqdnListGlobalRulestackClientGetResponse struct { + FqdnListGlobalRulestackResource +} + +// FqdnListGlobalRulestackClientListResponse contains the response from method FqdnListGlobalRulestackClient.NewListPager. +type FqdnListGlobalRulestackClientListResponse struct { + FqdnListGlobalRulestackResourceListResult +} + +// FqdnListLocalRulestackClientCreateOrUpdateResponse contains the response from method FqdnListLocalRulestackClient.BeginCreateOrUpdate. +type FqdnListLocalRulestackClientCreateOrUpdateResponse struct { + FqdnListLocalRulestackResource +} + +// FqdnListLocalRulestackClientDeleteResponse contains the response from method FqdnListLocalRulestackClient.BeginDelete. +type FqdnListLocalRulestackClientDeleteResponse struct { + // placeholder for future response values +} + +// FqdnListLocalRulestackClientGetResponse contains the response from method FqdnListLocalRulestackClient.Get. +type FqdnListLocalRulestackClientGetResponse struct { + FqdnListLocalRulestackResource +} + +// FqdnListLocalRulestackClientListByLocalRulestacksResponse contains the response from method FqdnListLocalRulestackClient.NewListByLocalRulestacksPager. +type FqdnListLocalRulestackClientListByLocalRulestacksResponse struct { + FqdnListLocalRulestackResourceListResult +} + +// GlobalRulestackClientCommitResponse contains the response from method GlobalRulestackClient.BeginCommit. +type GlobalRulestackClientCommitResponse struct { + // placeholder for future response values +} + +// GlobalRulestackClientCreateOrUpdateResponse contains the response from method GlobalRulestackClient.BeginCreateOrUpdate. +type GlobalRulestackClientCreateOrUpdateResponse struct { + GlobalRulestackResource +} + +// GlobalRulestackClientDeleteResponse contains the response from method GlobalRulestackClient.BeginDelete. +type GlobalRulestackClientDeleteResponse struct { + // placeholder for future response values +} + +// GlobalRulestackClientGetChangeLogResponse contains the response from method GlobalRulestackClient.GetChangeLog. +type GlobalRulestackClientGetChangeLogResponse struct { + Changelog +} + +// GlobalRulestackClientGetResponse contains the response from method GlobalRulestackClient.Get. +type GlobalRulestackClientGetResponse struct { + GlobalRulestackResource +} + +// GlobalRulestackClientListAdvancedSecurityObjectsResponse contains the response from method GlobalRulestackClient.ListAdvancedSecurityObjects. +type GlobalRulestackClientListAdvancedSecurityObjectsResponse struct { + AdvSecurityObjectListResponse +} + +// GlobalRulestackClientListAppIDsResponse contains the response from method GlobalRulestackClient.ListAppIDs. +type GlobalRulestackClientListAppIDsResponse struct { + ListAppIDResponse +} + +// GlobalRulestackClientListCountriesResponse contains the response from method GlobalRulestackClient.ListCountries. +type GlobalRulestackClientListCountriesResponse struct { + CountriesResponse +} + +// GlobalRulestackClientListFirewallsResponse contains the response from method GlobalRulestackClient.ListFirewalls. +type GlobalRulestackClientListFirewallsResponse struct { + ListFirewallsResponse +} + +// GlobalRulestackClientListPredefinedURLCategoriesResponse contains the response from method GlobalRulestackClient.ListPredefinedURLCategories. +type GlobalRulestackClientListPredefinedURLCategoriesResponse struct { + PredefinedURLCategoriesResponse +} + +// GlobalRulestackClientListResponse contains the response from method GlobalRulestackClient.NewListPager. +type GlobalRulestackClientListResponse struct { + GlobalRulestackResourceListResult +} + +// GlobalRulestackClientListSecurityServicesResponse contains the response from method GlobalRulestackClient.ListSecurityServices. +type GlobalRulestackClientListSecurityServicesResponse struct { + SecurityServicesResponse +} + +// GlobalRulestackClientRevertResponse contains the response from method GlobalRulestackClient.Revert. +type GlobalRulestackClientRevertResponse struct { + // placeholder for future response values +} + +// GlobalRulestackClientUpdateResponse contains the response from method GlobalRulestackClient.Update. +type GlobalRulestackClientUpdateResponse struct { + GlobalRulestackResource +} + +// LocalRulesClientCreateOrUpdateResponse contains the response from method LocalRulesClient.BeginCreateOrUpdate. +type LocalRulesClientCreateOrUpdateResponse struct { + LocalRulesResource +} + +// LocalRulesClientDeleteResponse contains the response from method LocalRulesClient.BeginDelete. +type LocalRulesClientDeleteResponse struct { + // placeholder for future response values +} + +// LocalRulesClientGetCountersResponse contains the response from method LocalRulesClient.GetCounters. +type LocalRulesClientGetCountersResponse struct { + RuleCounter +} + +// LocalRulesClientGetResponse contains the response from method LocalRulesClient.Get. +type LocalRulesClientGetResponse struct { + LocalRulesResource +} + +// LocalRulesClientListByLocalRulestacksResponse contains the response from method LocalRulesClient.NewListByLocalRulestacksPager. +type LocalRulesClientListByLocalRulestacksResponse struct { + LocalRulesResourceListResult +} + +// LocalRulesClientRefreshCountersResponse contains the response from method LocalRulesClient.RefreshCounters. +type LocalRulesClientRefreshCountersResponse struct { + // placeholder for future response values +} + +// LocalRulesClientResetCountersResponse contains the response from method LocalRulesClient.ResetCounters. +type LocalRulesClientResetCountersResponse struct { + RuleCounterReset +} + +// LocalRulestacksClientCommitResponse contains the response from method LocalRulestacksClient.BeginCommit. +type LocalRulestacksClientCommitResponse struct { + // placeholder for future response values +} + +// LocalRulestacksClientCreateOrUpdateResponse contains the response from method LocalRulestacksClient.BeginCreateOrUpdate. +type LocalRulestacksClientCreateOrUpdateResponse struct { + LocalRulestackResource +} + +// LocalRulestacksClientDeleteResponse contains the response from method LocalRulestacksClient.BeginDelete. +type LocalRulestacksClientDeleteResponse struct { + // placeholder for future response values +} + +// LocalRulestacksClientGetChangeLogResponse contains the response from method LocalRulestacksClient.GetChangeLog. +type LocalRulestacksClientGetChangeLogResponse struct { + Changelog +} + +// LocalRulestacksClientGetResponse contains the response from method LocalRulestacksClient.Get. +type LocalRulestacksClientGetResponse struct { + LocalRulestackResource +} + +// LocalRulestacksClientGetSupportInfoResponse contains the response from method LocalRulestacksClient.GetSupportInfo. +type LocalRulestacksClientGetSupportInfoResponse struct { + SupportInfo +} + +// LocalRulestacksClientListAdvancedSecurityObjectsResponse contains the response from method LocalRulestacksClient.ListAdvancedSecurityObjects. +type LocalRulestacksClientListAdvancedSecurityObjectsResponse struct { + AdvSecurityObjectListResponse +} + +// LocalRulestacksClientListAppIDsResponse contains the response from method LocalRulestacksClient.ListAppIDs. +type LocalRulestacksClientListAppIDsResponse struct { + ListAppIDResponse +} + +// LocalRulestacksClientListByResourceGroupResponse contains the response from method LocalRulestacksClient.NewListByResourceGroupPager. +type LocalRulestacksClientListByResourceGroupResponse struct { + LocalRulestackResourceListResult +} + +// LocalRulestacksClientListBySubscriptionResponse contains the response from method LocalRulestacksClient.NewListBySubscriptionPager. +type LocalRulestacksClientListBySubscriptionResponse struct { + LocalRulestackResourceListResult +} + +// LocalRulestacksClientListCountriesResponse contains the response from method LocalRulestacksClient.ListCountries. +type LocalRulestacksClientListCountriesResponse struct { + CountriesResponse +} + +// LocalRulestacksClientListFirewallsResponse contains the response from method LocalRulestacksClient.ListFirewalls. +type LocalRulestacksClientListFirewallsResponse struct { + ListFirewallsResponse +} + +// LocalRulestacksClientListPredefinedURLCategoriesResponse contains the response from method LocalRulestacksClient.ListPredefinedURLCategories. +type LocalRulestacksClientListPredefinedURLCategoriesResponse struct { + PredefinedURLCategoriesResponse +} + +// LocalRulestacksClientListSecurityServicesResponse contains the response from method LocalRulestacksClient.ListSecurityServices. +type LocalRulestacksClientListSecurityServicesResponse struct { + SecurityServicesResponse +} + +// LocalRulestacksClientRevertResponse contains the response from method LocalRulestacksClient.Revert. +type LocalRulestacksClientRevertResponse struct { + // placeholder for future response values +} + +// LocalRulestacksClientUpdateResponse contains the response from method LocalRulestacksClient.Update. +type LocalRulestacksClientUpdateResponse struct { + LocalRulestackResource +} + +// OperationsClientListResponse contains the response from method OperationsClient.NewListPager. +type OperationsClientListResponse struct { + OperationListResult +} + +// PostRulesClientCreateOrUpdateResponse contains the response from method PostRulesClient.BeginCreateOrUpdate. +type PostRulesClientCreateOrUpdateResponse struct { + PostRulesResource +} + +// PostRulesClientDeleteResponse contains the response from method PostRulesClient.BeginDelete. +type PostRulesClientDeleteResponse struct { + // placeholder for future response values +} + +// PostRulesClientGetCountersResponse contains the response from method PostRulesClient.GetCounters. +type PostRulesClientGetCountersResponse struct { + RuleCounter +} + +// PostRulesClientGetResponse contains the response from method PostRulesClient.Get. +type PostRulesClientGetResponse struct { + PostRulesResource +} + +// PostRulesClientListResponse contains the response from method PostRulesClient.NewListPager. +type PostRulesClientListResponse struct { + PostRulesResourceListResult +} + +// PostRulesClientRefreshCountersResponse contains the response from method PostRulesClient.RefreshCounters. +type PostRulesClientRefreshCountersResponse struct { + // placeholder for future response values +} + +// PostRulesClientResetCountersResponse contains the response from method PostRulesClient.ResetCounters. +type PostRulesClientResetCountersResponse struct { + RuleCounterReset +} + +// PreRulesClientCreateOrUpdateResponse contains the response from method PreRulesClient.BeginCreateOrUpdate. +type PreRulesClientCreateOrUpdateResponse struct { + PreRulesResource +} + +// PreRulesClientDeleteResponse contains the response from method PreRulesClient.BeginDelete. +type PreRulesClientDeleteResponse struct { + // placeholder for future response values +} + +// PreRulesClientGetCountersResponse contains the response from method PreRulesClient.GetCounters. +type PreRulesClientGetCountersResponse struct { + RuleCounter +} + +// PreRulesClientGetResponse contains the response from method PreRulesClient.Get. +type PreRulesClientGetResponse struct { + PreRulesResource +} + +// PreRulesClientListResponse contains the response from method PreRulesClient.NewListPager. +type PreRulesClientListResponse struct { + PreRulesResourceListResult +} + +// PreRulesClientRefreshCountersResponse contains the response from method PreRulesClient.RefreshCounters. +type PreRulesClientRefreshCountersResponse struct { + // placeholder for future response values +} + +// PreRulesClientResetCountersResponse contains the response from method PreRulesClient.ResetCounters. +type PreRulesClientResetCountersResponse struct { + RuleCounterReset +} + +// PrefixListGlobalRulestackClientCreateOrUpdateResponse contains the response from method PrefixListGlobalRulestackClient.BeginCreateOrUpdate. +type PrefixListGlobalRulestackClientCreateOrUpdateResponse struct { + PrefixListGlobalRulestackResource +} + +// PrefixListGlobalRulestackClientDeleteResponse contains the response from method PrefixListGlobalRulestackClient.BeginDelete. +type PrefixListGlobalRulestackClientDeleteResponse struct { + // placeholder for future response values +} + +// PrefixListGlobalRulestackClientGetResponse contains the response from method PrefixListGlobalRulestackClient.Get. +type PrefixListGlobalRulestackClientGetResponse struct { + PrefixListGlobalRulestackResource +} + +// PrefixListGlobalRulestackClientListResponse contains the response from method PrefixListGlobalRulestackClient.NewListPager. +type PrefixListGlobalRulestackClientListResponse struct { + PrefixListGlobalRulestackResourceListResult +} + +// PrefixListLocalRulestackClientCreateOrUpdateResponse contains the response from method PrefixListLocalRulestackClient.BeginCreateOrUpdate. +type PrefixListLocalRulestackClientCreateOrUpdateResponse struct { + PrefixListResource +} + +// PrefixListLocalRulestackClientDeleteResponse contains the response from method PrefixListLocalRulestackClient.BeginDelete. +type PrefixListLocalRulestackClientDeleteResponse struct { + // placeholder for future response values +} + +// PrefixListLocalRulestackClientGetResponse contains the response from method PrefixListLocalRulestackClient.Get. +type PrefixListLocalRulestackClientGetResponse struct { + PrefixListResource +} + +// PrefixListLocalRulestackClientListByLocalRulestacksResponse contains the response from method PrefixListLocalRulestackClient.NewListByLocalRulestacksPager. +type PrefixListLocalRulestackClientListByLocalRulestacksResponse struct { + PrefixListResourceListResult +} diff --git a/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/time_rfc3339.go b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/time_rfc3339.go new file mode 100644 index 000000000000..b37eb2234926 --- /dev/null +++ b/sdk/resourcemanager/paloaltonetworksngfw/armpanngfw/time_rfc3339.go @@ -0,0 +1,87 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package armpanngfw + +import ( + "encoding/json" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "reflect" + "regexp" + "strings" + "time" +) + +const ( + utcLayoutJSON = `"2006-01-02T15:04:05.999999999"` + utcLayout = "2006-01-02T15:04:05.999999999" + rfc3339JSON = `"` + time.RFC3339Nano + `"` +) + +// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. +var tzOffsetRegex = regexp.MustCompile(`(Z|z|\+|-)(\d+:\d+)*"*$`) + +type timeRFC3339 time.Time + +func (t timeRFC3339) MarshalJSON() (json []byte, err error) { + tt := time.Time(t) + return tt.MarshalJSON() +} + +func (t timeRFC3339) MarshalText() (text []byte, err error) { + tt := time.Time(t) + return tt.MarshalText() +} + +func (t *timeRFC3339) UnmarshalJSON(data []byte) error { + layout := utcLayoutJSON + if tzOffsetRegex.Match(data) { + layout = rfc3339JSON + } + return t.Parse(layout, string(data)) +} + +func (t *timeRFC3339) UnmarshalText(data []byte) (err error) { + layout := utcLayout + if tzOffsetRegex.Match(data) { + layout = time.RFC3339Nano + } + return t.Parse(layout, string(data)) +} + +func (t *timeRFC3339) Parse(layout, value string) error { + p, err := time.Parse(layout, strings.ToUpper(value)) + *t = timeRFC3339(p) + return err +} + +func populateTimeRFC3339(m map[string]any, k string, t *time.Time) { + if t == nil { + return + } else if azcore.IsNullValue(t) { + m[k] = nil + return + } else if reflect.ValueOf(t).IsNil() { + return + } + m[k] = (*timeRFC3339)(t) +} + +func unpopulateTimeRFC3339(data json.RawMessage, fn string, t **time.Time) error { + if data == nil || strings.EqualFold(string(data), "null") { + return nil + } + var aux timeRFC3339 + if err := json.Unmarshal(data, &aux); err != nil { + return fmt.Errorf("struct field %s: %v", fn, err) + } + *t = (*time.Time)(&aux) + return nil +} From 2a8d96d355a38886b1bdf7b99d029b7f57108ee0 Mon Sep 17 00:00:00 2001 From: Peng Jiahui <46921893+Alancere@users.noreply.github.com> Date: Thu, 4 May 2023 14:32:24 +0800 Subject: [PATCH 14/50] add sdk/resourcemanager/postgresql/armpostgresql live test (#20685) * add sdk/resourcemanager/postgresql/armpostgresql live test * update assets.json * set subscriptionId default value * format --- .../postgresql/armpostgresql/assets.json | 2 +- .../armpostgresql/postgresql_live_test.go | 191 ++++++------ .../privateendpointconnections_live_test.go | 271 ++++++++++++++++++ .../serversecurityalertpolicies_live_test.go | 126 ++++++++ 4 files changed, 492 insertions(+), 98 deletions(-) create mode 100644 sdk/resourcemanager/postgresql/armpostgresql/privateendpointconnections_live_test.go create mode 100644 sdk/resourcemanager/postgresql/armpostgresql/serversecurityalertpolicies_live_test.go diff --git a/sdk/resourcemanager/postgresql/armpostgresql/assets.json b/sdk/resourcemanager/postgresql/armpostgresql/assets.json index 0a673e5602dd..c7e1e87d6349 100644 --- a/sdk/resourcemanager/postgresql/armpostgresql/assets.json +++ b/sdk/resourcemanager/postgresql/armpostgresql/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "go", "TagPrefix": "go/resourcemanager/postgresql/armpostgresql", - "Tag": "go/resourcemanager/postgresql/armpostgresql_d9bc2d0da1" + "Tag": "go/resourcemanager/postgresql/armpostgresql_2a306718c0" } diff --git a/sdk/resourcemanager/postgresql/armpostgresql/postgresql_live_test.go b/sdk/resourcemanager/postgresql/armpostgresql/postgresql_live_test.go index 7bf657fd16b5..64c3c42c4a63 100644 --- a/sdk/resourcemanager/postgresql/armpostgresql/postgresql_live_test.go +++ b/sdk/resourcemanager/postgresql/armpostgresql/postgresql_live_test.go @@ -14,6 +14,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/recording" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/postgresql/armpostgresql" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources" @@ -35,14 +36,14 @@ type PostgresqlTestSuite struct { func (testsuite *PostgresqlTestSuite) SetupSuite() { testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/postgresql/armpostgresql/testdata") + testsuite.ctx = context.Background() testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) - testsuite.serverName = testutil.GenerateAlphaNumericID(testsuite.T(), "pgservers", 6) - testsuite.adminPassword = testutil.GetEnv("ADMIN_PASSWORD", "000000000000") - testsuite.location = testutil.GetEnv("LOCATION", "eastus") + testsuite.serverName, _ = recording.GenerateAlphaNumericID(testsuite.T(), "serverna", 14, true) + testsuite.adminPassword = testutil.GetEnv("ADMIN_PASSWORD", "") + testsuite.location = testutil.GetEnv("LOCATION", "westus") testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") - resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) testsuite.Require().NoError(err) testsuite.resourceGroupName = *resourceGroup.Name @@ -81,8 +82,8 @@ func (testsuite *PostgresqlTestSuite) Prepare() { AdministratorLoginPassword: to.Ptr(testsuite.adminPassword), }, SKU: &armpostgresql.SKU{ - Family: to.Ptr("Gen5"), Name: to.Ptr("GP_Gen5_8"), + Family: to.Ptr("Gen5"), Tier: to.Ptr(armpostgresql.SKUTierGeneralPurpose), }, Tags: map[string]*string{ @@ -94,7 +95,7 @@ func (testsuite *PostgresqlTestSuite) Prepare() { testsuite.Require().NoError(err) } -// Microsoft.DBforPostgreSQL/servers +// Microsoft.DBforPostgreSQL/servers/{serverName} func (testsuite *PostgresqlTestSuite) TestServers() { var err error // From step Servers_List @@ -143,23 +144,8 @@ func (testsuite *PostgresqlTestSuite) TestServers() { testsuite.Require().NoError(err) } -// Microsoft.DBforPostgreSQL/servers/replicas -func (testsuite *PostgresqlTestSuite) TestReplicas() { - var err error - // From step Replicas_ListByServer - fmt.Println("Call operation: Replicas_ListByServer") - replicasClient, err := armpostgresql.NewReplicasClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) - testsuite.Require().NoError(err) - replicasClientNewListByServerPager := replicasClient.NewListByServerPager(testsuite.resourceGroupName, testsuite.serverName, nil) - for replicasClientNewListByServerPager.More() { - _, err := replicasClientNewListByServerPager.NextPage(testsuite.ctx) - testsuite.Require().NoError(err) - break - } -} - -// Microsoft.DBforPostgreSQL/servers/firewallRules -func (testsuite *PostgresqlTestSuite) TestFirewallrules() { +// Microsoft.DBforPostgreSQL/servers/{serverName}/firewallRules/{firewallRuleName} +func (testsuite *PostgresqlTestSuite) TestFirewallRules() { var err error // From step FirewallRules_CreateOrUpdate fmt.Println("Call operation: FirewallRules_CreateOrUpdate") @@ -197,67 +183,63 @@ func (testsuite *PostgresqlTestSuite) TestFirewallrules() { testsuite.Require().NoError(err) } -// Microsoft.DBforPostgreSQL/servers/virtualNetworkRules -func (testsuite *PostgresqlTestSuite) TestVirtualnetworkrules() { +// Microsoft.DBforPostgreSQL/servers/{serverName}/virtualNetworkRules/{virtualNetworkRuleName} +func (testsuite *PostgresqlTestSuite) TestVirtualNetworkRules() { var subnetId string var err error // From step VirtualNetwork_Create - template := map[string]interface{}{ + template := map[string]any{ "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", "contentVersion": "1.0.0.0", - "outputs": map[string]interface{}{ - "subnetId": map[string]interface{}{ + "outputs": map[string]any{ + "subnetId": map[string]any{ "type": "string", "value": "[resourceId('Microsoft.Network/virtualNetworks/subnets', parameters('virtualNetworksName'), parameters('subnetName'))]", }, }, - "parameters": map[string]interface{}{ - "location": map[string]interface{}{ + "parameters": map[string]any{ + "location": map[string]any{ "type": "string", - "defaultValue": "$(location)", + "defaultValue": testsuite.location, }, - "subnetName": map[string]interface{}{ + "subnetName": map[string]any{ "type": "string", "defaultValue": "pgsubnet", }, - "virtualNetworksName": map[string]interface{}{ + "virtualNetworksName": map[string]any{ "type": "string", "defaultValue": "pgvnet", }, }, - "resources": []interface{}{ - map[string]interface{}{ + "resources": []any{ + map[string]any{ "name": "[parameters('virtualNetworksName')]", "type": "Microsoft.Network/virtualNetworks", "apiVersion": "2021-05-01", "location": "[parameters('location')]", - "properties": map[string]interface{}{ - "addressSpace": map[string]interface{}{ - "addressPrefixes": []interface{}{ + "properties": map[string]any{ + "addressSpace": map[string]any{ + "addressPrefixes": []any{ "10.0.0.0/16", }, }, - "subnets": []interface{}{ - map[string]interface{}{ + "subnets": []any{ + map[string]any{ "name": "[parameters('subnetName')]", - "properties": map[string]interface{}{ + "properties": map[string]any{ "addressPrefix": "10.0.0.0/24", }, }, }, }, - "tags": map[string]interface{}{}, + "tags": map[string]any{}, }, }, } - params := map[string]interface{}{ - "location": map[string]interface{}{"value": testsuite.location}, - } deployment := armresources.Deployment{ Properties: &armresources.DeploymentProperties{ - Template: template, - Parameters: params, - Mode: to.Ptr(armresources.DeploymentModeIncremental), + Template: template, + Mode: to.Ptr(armresources.DeploymentModeIncremental), }, } deploymentExtend, err := testutil.CreateDeployment(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName, "VirtualNetwork_Create", &deployment) @@ -300,7 +282,7 @@ func (testsuite *PostgresqlTestSuite) TestVirtualnetworkrules() { testsuite.Require().NoError(err) } -// Microsoft.DBforPostgreSQL/servers/databases +// Microsoft.DBforPostgreSQL/servers/{serverName}/databases/{databaseName} func (testsuite *PostgresqlTestSuite) TestDatabases() { var err error // From step Databases_CreateOrUpdate @@ -339,7 +321,7 @@ func (testsuite *PostgresqlTestSuite) TestDatabases() { testsuite.Require().NoError(err) } -// Microsoft.DBforPostgreSQL/servers/configurations +// Microsoft.DBforPostgreSQL/servers/{serverName}/configurations/{configurationName} func (testsuite *PostgresqlTestSuite) TestConfigurations() { var err error // From step Configurations_CreateOrUpdate @@ -371,8 +353,64 @@ func (testsuite *PostgresqlTestSuite) TestConfigurations() { testsuite.Require().NoError(err) } +// Microsoft.DBforPostgreSQL/servers/{serverName}/administrators/activeDirectory +func (testsuite *PostgresqlTestSuite) TestServerAdministrators() { + var err error + // From step ServerAdministrators_CreateOrUpdate + fmt.Println("Call operation: ServerAdministrators_CreateOrUpdate") + serverAdministratorsClient, err := armpostgresql.NewServerAdministratorsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + serverAdministratorsClientCreateOrUpdateResponsePoller, err := serverAdministratorsClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.serverName, armpostgresql.ServerAdministratorResource{ + Properties: &armpostgresql.ServerAdministratorProperties{ + AdministratorType: to.Ptr("ActiveDirectory"), + Login: to.Ptr("bob@contoso.com"), + Sid: to.Ptr("c6b82b90-a647-49cb-8a62-0d2d3cb7ac7c"), + TenantID: to.Ptr("c6b82b90-a647-49cb-8a62-0d2d3cb7ac7c"), + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, serverAdministratorsClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step ServerAdministrators_List + fmt.Println("Call operation: ServerAdministrators_List") + serverAdministratorsClientNewListPager := serverAdministratorsClient.NewListPager(testsuite.resourceGroupName, testsuite.serverName, nil) + for serverAdministratorsClientNewListPager.More() { + _, err := serverAdministratorsClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step ServerAdministrators_Get + fmt.Println("Call operation: ServerAdministrators_Get") + _, err = serverAdministratorsClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.serverName, nil) + testsuite.Require().NoError(err) + + // From step ServerAdministrators_Delete + fmt.Println("Call operation: ServerAdministrators_Delete") + serverAdministratorsClientDeleteResponsePoller, err := serverAdministratorsClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.serverName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, serverAdministratorsClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} + +// Microsoft.DBforPostgreSQL/servers/replicas +func (testsuite *PostgresqlTestSuite) TestReplicas() { + var err error + // From step Replicas_ListByServer + fmt.Println("Call operation: Replicas_ListByServer") + replicasClient, err := armpostgresql.NewReplicasClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + replicasClientNewListByServerPager := replicasClient.NewListByServerPager(testsuite.resourceGroupName, testsuite.serverName, nil) + for replicasClientNewListByServerPager.More() { + _, err := replicasClientNewListByServerPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } +} + // Microsoft.DBforPostgreSQL/servers/updateConfigurations -func (testsuite *PostgresqlTestSuite) TestServerparameters() { +func (testsuite *PostgresqlTestSuite) TestServerParameters() { var err error // From step ServerParameters_ListUpdateConfigurations fmt.Println("Call operation: ServerParameters_ListUpdateConfigurations") @@ -399,7 +437,7 @@ func (testsuite *PostgresqlTestSuite) TestServerparameters() { } // Microsoft.DBforPostgreSQL/servers/logFiles -func (testsuite *PostgresqlTestSuite) TestLogfiles() { +func (testsuite *PostgresqlTestSuite) TestLogFiles() { var err error // From step LogFiles_ListByServer fmt.Println("Call operation: LogFiles_ListByServer") @@ -413,49 +451,8 @@ func (testsuite *PostgresqlTestSuite) TestLogfiles() { } } -// Microsoft.DBforPostgreSQL/servers/administrators/activeDirectory -func (testsuite *PostgresqlTestSuite) TestServeradministrators() { - var err error - // From step ServerAdministrators_CreateOrUpdate - fmt.Println("Call operation: ServerAdministrators_CreateOrUpdate") - serverAdministratorsClient, err := armpostgresql.NewServerAdministratorsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) - testsuite.Require().NoError(err) - serverAdministratorsClientCreateOrUpdateResponsePoller, err := serverAdministratorsClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.serverName, armpostgresql.ServerAdministratorResource{ - Properties: &armpostgresql.ServerAdministratorProperties{ - AdministratorType: to.Ptr("ActiveDirectory"), - Login: to.Ptr("bob@contoso.com"), - Sid: to.Ptr("c6b82b90-a647-49cb-8a62-0d2d3cb7ac7c"), - TenantID: to.Ptr("c6b82b90-a647-49cb-8a62-0d2d3cb7ac7c"), - }, - }, nil) - testsuite.Require().NoError(err) - _, err = testutil.PollForTest(testsuite.ctx, serverAdministratorsClientCreateOrUpdateResponsePoller) - testsuite.Require().NoError(err) - - // From step ServerAdministrators_List - fmt.Println("Call operation: ServerAdministrators_List") - serverAdministratorsClientNewListPager := serverAdministratorsClient.NewListPager(testsuite.resourceGroupName, testsuite.serverName, nil) - for serverAdministratorsClientNewListPager.More() { - _, err := serverAdministratorsClientNewListPager.NextPage(testsuite.ctx) - testsuite.Require().NoError(err) - break - } - - // From step ServerAdministrators_Get - fmt.Println("Call operation: ServerAdministrators_Get") - _, err = serverAdministratorsClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.serverName, nil) - testsuite.Require().NoError(err) - - // From step ServerAdministrators_Delete - fmt.Println("Call operation: ServerAdministrators_Delete") - serverAdministratorsClientDeleteResponsePoller, err := serverAdministratorsClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.serverName, nil) - testsuite.Require().NoError(err) - _, err = testutil.PollForTest(testsuite.ctx, serverAdministratorsClientDeleteResponsePoller) - testsuite.Require().NoError(err) -} - // Microsoft.DBforPostgreSQL/servers/recoverableServers -func (testsuite *PostgresqlTestSuite) TestRecoverableservers() { +func (testsuite *PostgresqlTestSuite) TestRecoverableServers() { var err error // From step RecoverableServers_Get fmt.Println("Call operation: RecoverableServers_Get") @@ -466,7 +463,7 @@ func (testsuite *PostgresqlTestSuite) TestRecoverableservers() { } // Microsoft.DBforPostgreSQL/servers/performanceTiers -func (testsuite *PostgresqlTestSuite) TestServerbasedperformancetier() { +func (testsuite *PostgresqlTestSuite) TestServerBasedPerformanceTier() { var err error // From step ServerBasedPerformanceTier_List fmt.Println("Call operation: ServerBasedPerformanceTier_List") @@ -481,7 +478,7 @@ func (testsuite *PostgresqlTestSuite) TestServerbasedperformancetier() { } // Microsoft.DBforPostgreSQL/locations/performanceTiers -func (testsuite *PostgresqlTestSuite) TestLocationbasedperformancetier() { +func (testsuite *PostgresqlTestSuite) TestLocationBasedPerformanceTier() { var err error // From step LocationBasedPerformanceTier_List fmt.Println("Call operation: LocationBasedPerformanceTier_List") @@ -496,7 +493,7 @@ func (testsuite *PostgresqlTestSuite) TestLocationbasedperformancetier() { } // Microsoft.DBforPostgreSQL/checkNameAvailability -func (testsuite *PostgresqlTestSuite) TestChecknameavailability() { +func (testsuite *PostgresqlTestSuite) TestCheckNameAvailability() { var err error // From step CheckNameAvailability_Execute fmt.Println("Call operation: CheckNameAvailability_Execute") diff --git a/sdk/resourcemanager/postgresql/armpostgresql/privateendpointconnections_live_test.go b/sdk/resourcemanager/postgresql/armpostgresql/privateendpointconnections_live_test.go new file mode 100644 index 000000000000..ba1c718d40df --- /dev/null +++ b/sdk/resourcemanager/postgresql/armpostgresql/privateendpointconnections_live_test.go @@ -0,0 +1,271 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package armpostgresql_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/recording" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/postgresql/armpostgresql" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources" + "github.com/stretchr/testify/suite" +) + +type PrivateEndpointConnectionsTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + postgresqlserverId string + serverName string + adminPassword string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *PrivateEndpointConnectionsTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/postgresql/armpostgresql/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.serverName, _ = recording.GenerateAlphaNumericID(testsuite.T(), "serverna", 14, true) + testsuite.adminPassword = testutil.GetEnv("ADMIN_PASSWORD", "") + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name + testsuite.Prepare() +} + +func (testsuite *PrivateEndpointConnectionsTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestPrivateEndpointConnectionsTestSuite(t *testing.T) { + suite.Run(t, new(PrivateEndpointConnectionsTestSuite)) +} + +func (testsuite *PrivateEndpointConnectionsTestSuite) Prepare() { + var err error + // From step Servers_Create + fmt.Println("Call operation: Servers_Create") + serversClient, err := armpostgresql.NewServersClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + serversClientCreateResponsePoller, err := serversClient.BeginCreate(testsuite.ctx, testsuite.resourceGroupName, testsuite.serverName, armpostgresql.ServerForCreate{ + Location: to.Ptr(testsuite.location), + Properties: &armpostgresql.ServerPropertiesForDefaultCreate{ + CreateMode: to.Ptr(armpostgresql.CreateModeDefault), + MinimalTLSVersion: to.Ptr(armpostgresql.MinimalTLSVersionEnumTLS12), + SSLEnforcement: to.Ptr(armpostgresql.SSLEnforcementEnumEnabled), + StorageProfile: &armpostgresql.StorageProfile{ + BackupRetentionDays: to.Ptr[int32](7), + GeoRedundantBackup: to.Ptr(armpostgresql.GeoRedundantBackupDisabled), + StorageMB: to.Ptr[int32](128000), + }, + AdministratorLogin: to.Ptr("cloudsa"), + AdministratorLoginPassword: to.Ptr(testsuite.adminPassword), + }, + SKU: &armpostgresql.SKU{ + Name: to.Ptr("GP_Gen5_8"), + Family: to.Ptr("Gen5"), + Tier: to.Ptr(armpostgresql.SKUTierGeneralPurpose), + }, + Tags: map[string]*string{ + "ElasticServer": to.Ptr("1"), + }, + }, nil) + testsuite.Require().NoError(err) + var serversClientCreateResponse *armpostgresql.ServersClientCreateResponse + serversClientCreateResponse, err = testutil.PollForTest(testsuite.ctx, serversClientCreateResponsePoller) + testsuite.Require().NoError(err) + testsuite.postgresqlserverId = *serversClientCreateResponse.ID +} + +// Microsoft.DBforPostgreSQL/servers/{serverName}/privateEndpointConnections/{privateEndpointConnectionName} +func (testsuite *PrivateEndpointConnectionsTestSuite) TestPrivateEndpointConnections() { + var err error + // From step Create_PrivateEndpoint + template := map[string]any{ + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": map[string]any{ + "location": map[string]any{ + "type": "string", + "defaultValue": testsuite.location, + }, + "networkInterfaceName": map[string]any{ + "type": "string", + "defaultValue": "pepostgresql-nic", + }, + "postgresqlserverId": map[string]any{ + "type": "string", + "defaultValue": testsuite.postgresqlserverId, + }, + "privateEndpointName": map[string]any{ + "type": "string", + "defaultValue": "pepostgresql", + }, + "virtualNetworksName": map[string]any{ + "type": "string", + "defaultValue": "pepostgresqlvnet", + }, + }, + "resources": []any{ + map[string]any{ + "name": "[parameters('virtualNetworksName')]", + "type": "Microsoft.Network/virtualNetworks", + "apiVersion": "2020-11-01", + "location": "[parameters('location')]", + "properties": map[string]any{ + "addressSpace": map[string]any{ + "addressPrefixes": []any{ + "10.0.0.0/16", + }, + }, + "enableDdosProtection": false, + "subnets": []any{ + map[string]any{ + "name": "default", + "properties": map[string]any{ + "addressPrefix": "10.0.0.0/24", + "delegations": []any{}, + "privateEndpointNetworkPolicies": "Disabled", + "privateLinkServiceNetworkPolicies": "Enabled", + }, + }, + }, + "virtualNetworkPeerings": []any{}, + }, + }, + map[string]any{ + "name": "[parameters('networkInterfaceName')]", + "type": "Microsoft.Network/networkInterfaces", + "apiVersion": "2020-11-01", + "dependsOn": []any{ + "[resourceId('Microsoft.Network/virtualNetworks', parameters('virtualNetworksName'))]", + }, + "location": "[parameters('location')]", + "properties": map[string]any{ + "dnsSettings": map[string]any{ + "dnsServers": []any{}, + }, + "enableIPForwarding": false, + "ipConfigurations": []any{ + map[string]any{ + "name": "privateEndpointIpConfig", + "properties": map[string]any{ + "primary": true, + "privateIPAddress": "10.0.0.4", + "privateIPAddressVersion": "IPv4", + "privateIPAllocationMethod": "Dynamic", + "subnet": map[string]any{ + "id": "[resourceId('Microsoft.Network/virtualNetworks/subnets', parameters('virtualNetworksName'), 'default')]", + }, + }, + }, + }, + }, + }, + map[string]any{ + "name": "[parameters('privateEndpointName')]", + "type": "Microsoft.Network/privateEndpoints", + "apiVersion": "2020-11-01", + "dependsOn": []any{ + "[resourceId('Microsoft.Network/virtualNetworks', parameters('virtualNetworksName'))]", + }, + "location": "[parameters('location')]", + "properties": map[string]any{ + "customDnsConfigs": []any{}, + "manualPrivateLinkServiceConnections": []any{}, + "privateLinkServiceConnections": []any{ + map[string]any{ + "name": "[parameters('privateEndpointName')]", + "properties": map[string]any{ + "groupIds": []any{ + "postgresqlServer", + }, + "privateLinkServiceConnectionState": map[string]any{ + "description": "Auto-Approved", + "actionsRequired": "None", + "status": "Approved", + }, + "privateLinkServiceId": "[parameters('postgresqlserverId')]", + }, + }, + }, + "subnet": map[string]any{ + "id": "[resourceId('Microsoft.Network/virtualNetworks/subnets', parameters('virtualNetworksName'), 'default')]", + }, + }, + }, + }, + "variables": map[string]any{}, + } + deployment := armresources.Deployment{ + Properties: &armresources.DeploymentProperties{ + Template: template, + Mode: to.Ptr(armresources.DeploymentModeIncremental), + }, + } + _, err = testutil.CreateDeployment(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName, "Create_PrivateEndpoint", &deployment) + testsuite.Require().NoError(err) + + var privateEndpointConnectionName string + // From step PrivateEndpointConnections_ListByServer + fmt.Println("Call operation: PrivateEndpointConnections_ListByServer") + privateEndpointConnectionsClient, err := armpostgresql.NewPrivateEndpointConnectionsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + privateEndpointConnectionsClientNewListByServerPager := privateEndpointConnectionsClient.NewListByServerPager(testsuite.resourceGroupName, testsuite.serverName, nil) + for privateEndpointConnectionsClientNewListByServerPager.More() { + result, err := privateEndpointConnectionsClientNewListByServerPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + privateEndpointConnectionName = *result.Value[0].Name + break + } + + // From step PrivateEndpointConnections_Get + fmt.Println("Call operation: PrivateEndpointConnections_Get") + _, err = privateEndpointConnectionsClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.serverName, privateEndpointConnectionName, nil) + testsuite.Require().NoError(err) + + var privateLinkResourceName string + // from step PrivateLinkResources_ListByServer + fmt.Println("Call operation: PrivateLinkResources_ListByServer") + privateLinkResourcesClient, err := armpostgresql.NewPrivateLinkResourcesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + privateLinkResourcesClientNewListByServerPager := privateLinkResourcesClient.NewListByServerPager(testsuite.resourceGroupName, testsuite.serverName, nil) + for privateLinkResourcesClientNewListByServerPager.More() { + result, err := privateLinkResourcesClientNewListByServerPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + privateLinkResourceName = *result.Value[0].Name + break + } + + // From step PrivateLinkResources_Get + fmt.Println("Call operation: PrivateLinkResources_Get") + _, err = privateLinkResourcesClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.serverName, privateLinkResourceName, nil) + testsuite.Require().NoError(err) + + // From step PrivateEndpointConnections_Delete + fmt.Println("Call operation: PrivateEndpointConnections_Delete") + privateEndpointConnectionsClientDeleteResponsePoller, err := privateEndpointConnectionsClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.serverName, privateEndpointConnectionName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, privateEndpointConnectionsClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/postgresql/armpostgresql/serversecurityalertpolicies_live_test.go b/sdk/resourcemanager/postgresql/armpostgresql/serversecurityalertpolicies_live_test.go new file mode 100644 index 000000000000..f8cecb98ac1c --- /dev/null +++ b/sdk/resourcemanager/postgresql/armpostgresql/serversecurityalertpolicies_live_test.go @@ -0,0 +1,126 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package armpostgresql_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/recording" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/postgresql/armpostgresql" + "github.com/stretchr/testify/suite" +) + +type ServerSecurityAlertPoliciesTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + serverName string + adminPassword string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *ServerSecurityAlertPoliciesTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/postgresql/armpostgresql/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.serverName, _ = recording.GenerateAlphaNumericID(testsuite.T(), "serverna", 14, true) + testsuite.adminPassword = testutil.GetEnv("ADMIN_PASSWORD", "") + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name + testsuite.Prepare() +} + +func (testsuite *ServerSecurityAlertPoliciesTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestServerSecurityAlertPoliciesTestSuite(t *testing.T) { + suite.Run(t, new(ServerSecurityAlertPoliciesTestSuite)) +} + +func (testsuite *ServerSecurityAlertPoliciesTestSuite) Prepare() { + var err error + // From step Servers_Create + fmt.Println("Call operation: Servers_Create") + serversClient, err := armpostgresql.NewServersClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + serversClientCreateResponsePoller, err := serversClient.BeginCreate(testsuite.ctx, testsuite.resourceGroupName, testsuite.serverName, armpostgresql.ServerForCreate{ + Location: to.Ptr(testsuite.location), + Properties: &armpostgresql.ServerPropertiesForDefaultCreate{ + CreateMode: to.Ptr(armpostgresql.CreateModeDefault), + MinimalTLSVersion: to.Ptr(armpostgresql.MinimalTLSVersionEnumTLS12), + SSLEnforcement: to.Ptr(armpostgresql.SSLEnforcementEnumEnabled), + StorageProfile: &armpostgresql.StorageProfile{ + BackupRetentionDays: to.Ptr[int32](7), + GeoRedundantBackup: to.Ptr(armpostgresql.GeoRedundantBackupDisabled), + StorageMB: to.Ptr[int32](128000), + }, + AdministratorLogin: to.Ptr("cloudsa"), + AdministratorLoginPassword: to.Ptr(testsuite.adminPassword), + }, + SKU: &armpostgresql.SKU{ + Name: to.Ptr("GP_Gen5_8"), + Family: to.Ptr("Gen5"), + Tier: to.Ptr(armpostgresql.SKUTierGeneralPurpose), + }, + Tags: map[string]*string{ + "ElasticServer": to.Ptr("1"), + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, serversClientCreateResponsePoller) + testsuite.Require().NoError(err) +} + +// Microsoft.DBforPostgreSQL/servers/{serverName}/securityAlertPolicies/{securityAlertPolicyName} +func (testsuite *ServerSecurityAlertPoliciesTestSuite) TestServerSecurityAlertPolicies() { + var err error + // From step ServerSecurityAlertPolicies_CreateOrUpdate + fmt.Println("Call operation: ServerSecurityAlertPolicies_CreateOrUpdate") + serverSecurityAlertPoliciesClient, err := armpostgresql.NewServerSecurityAlertPoliciesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + serverSecurityAlertPoliciesClientCreateOrUpdateResponsePoller, err := serverSecurityAlertPoliciesClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.serverName, armpostgresql.SecurityAlertPolicyNameDefault, armpostgresql.ServerSecurityAlertPolicy{ + Properties: &armpostgresql.SecurityAlertPolicyProperties{ + EmailAccountAdmins: to.Ptr(true), + State: to.Ptr(armpostgresql.ServerSecurityAlertPolicyStateDisabled), + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, serverSecurityAlertPoliciesClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step ServerSecurityAlertPolicies_ListByServer + fmt.Println("Call operation: ServerSecurityAlertPolicies_ListByServer") + serverSecurityAlertPoliciesClientNewListByServerPager := serverSecurityAlertPoliciesClient.NewListByServerPager(testsuite.resourceGroupName, testsuite.serverName, nil) + for serverSecurityAlertPoliciesClientNewListByServerPager.More() { + _, err := serverSecurityAlertPoliciesClientNewListByServerPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step ServerSecurityAlertPolicies_Get + fmt.Println("Call operation: ServerSecurityAlertPolicies_Get") + _, err = serverSecurityAlertPoliciesClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.serverName, armpostgresql.SecurityAlertPolicyNameDefault, nil) + testsuite.Require().NoError(err) +} From 0d22aeddaaa48d5ac34320978393bc026dba4ccb Mon Sep 17 00:00:00 2001 From: Peng Jiahui <46921893+Alancere@users.noreply.github.com> Date: Thu, 4 May 2023 14:55:00 +0800 Subject: [PATCH 15/50] add sdk/resourcemanager/eventhub/armeventhub live test (#20686) * add sdk/resourcemanager/eventhub/armeventhub live test * update assets --- .../eventhub/armeventhub/assets.json | 2 +- .../disasterrecoveryconfigs_live_test.go | 9 +- .../armeventhub/eventhub_live_test.go | 13 +- .../privateendpointconnections_live_test.go | 245 ++++++++++++++++++ 4 files changed, 258 insertions(+), 11 deletions(-) create mode 100644 sdk/resourcemanager/eventhub/armeventhub/privateendpointconnections_live_test.go diff --git a/sdk/resourcemanager/eventhub/armeventhub/assets.json b/sdk/resourcemanager/eventhub/armeventhub/assets.json index 7424094e97a0..0368812f7eee 100644 --- a/sdk/resourcemanager/eventhub/armeventhub/assets.json +++ b/sdk/resourcemanager/eventhub/armeventhub/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "go", "TagPrefix": "go/resourcemanager/eventhub/armeventhub", - "Tag": "go/resourcemanager/eventhub/armeventhub_05a4cd540a" + "Tag": "go/resourcemanager/eventhub/armeventhub_98cad0dc15" } diff --git a/sdk/resourcemanager/eventhub/armeventhub/disasterrecoveryconfigs_live_test.go b/sdk/resourcemanager/eventhub/armeventhub/disasterrecoveryconfigs_live_test.go index 54bab7d31945..eb698fc09059 100644 --- a/sdk/resourcemanager/eventhub/armeventhub/disasterrecoveryconfigs_live_test.go +++ b/sdk/resourcemanager/eventhub/armeventhub/disasterrecoveryconfigs_live_test.go @@ -14,6 +14,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/recording" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" "github.com/stretchr/testify/suite" @@ -39,10 +40,10 @@ func (testsuite *DisasterrecoveryconfigsTestSuite) SetupSuite() { testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/eventhub/armeventhub/testdata") testsuite.ctx = context.Background() testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) - testsuite.alias = testutil.GenerateAlphaNumericID(testsuite.T(), "alias2", 6) - testsuite.authorizationRuleName = testutil.GenerateAlphaNumericID(testsuite.T(), "authorizat2", 6) - testsuite.namespaceName = testutil.GenerateAlphaNumericID(testsuite.T(), "namespacen2", 6) - testsuite.namespaceNameSecond = testutil.GenerateAlphaNumericID(testsuite.T(), "namespacensecond", 6) + testsuite.alias, _ = recording.GenerateAlphaNumericID(testsuite.T(), "alias", 11, false) + testsuite.authorizationRuleName, _ = recording.GenerateAlphaNumericID(testsuite.T(), "authorizat", 16, false) + testsuite.namespaceName, _ = recording.GenerateAlphaNumericID(testsuite.T(), "namespacen", 16, false) + testsuite.namespaceNameSecond, _ = recording.GenerateAlphaNumericID(testsuite.T(), "namespacensecond", 22, false) testsuite.location = testutil.GetEnv("LOCATION", "westus") testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") diff --git a/sdk/resourcemanager/eventhub/armeventhub/eventhub_live_test.go b/sdk/resourcemanager/eventhub/armeventhub/eventhub_live_test.go index c69fa9f2207c..dfac34291280 100644 --- a/sdk/resourcemanager/eventhub/armeventhub/eventhub_live_test.go +++ b/sdk/resourcemanager/eventhub/armeventhub/eventhub_live_test.go @@ -14,6 +14,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/recording" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources" @@ -43,12 +44,12 @@ func (testsuite *EventhubTestSuite) SetupSuite() { testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/eventhub/armeventhub/testdata") testsuite.ctx = context.Background() testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) - testsuite.applicationGroupName = testutil.GenerateAlphaNumericID(testsuite.T(), "applicatio", 6) - testsuite.authorizationRuleName = testutil.GenerateAlphaNumericID(testsuite.T(), "authorizat", 6) - testsuite.consumerGroupName = testutil.GenerateAlphaNumericID(testsuite.T(), "consumergr", 6) - testsuite.eventHubName = testutil.GenerateAlphaNumericID(testsuite.T(), "eventhubna", 6) - testsuite.namespaceName = testutil.GenerateAlphaNumericID(testsuite.T(), "namespacen", 6) - testsuite.schemaGroupName = testutil.GenerateAlphaNumericID(testsuite.T(), "schemagrou", 6) + testsuite.applicationGroupName, _ = recording.GenerateAlphaNumericID(testsuite.T(), "applicatio", 16, false) + testsuite.authorizationRuleName, _ = recording.GenerateAlphaNumericID(testsuite.T(), "authorizat", 16, false) + testsuite.consumerGroupName, _ = recording.GenerateAlphaNumericID(testsuite.T(), "consumergr", 16, false) + testsuite.eventHubName, _ = recording.GenerateAlphaNumericID(testsuite.T(), "eventhubna", 16, false) + testsuite.namespaceName, _ = recording.GenerateAlphaNumericID(testsuite.T(), "namespacen", 16, false) + testsuite.schemaGroupName, _ = recording.GenerateAlphaNumericID(testsuite.T(), "schemagrou", 16, false) testsuite.storageAccountName = "storageeventhub2" testsuite.location = testutil.GetEnv("LOCATION", "westus") testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") diff --git a/sdk/resourcemanager/eventhub/armeventhub/privateendpointconnections_live_test.go b/sdk/resourcemanager/eventhub/armeventhub/privateendpointconnections_live_test.go new file mode 100644 index 000000000000..0d88b84d568f --- /dev/null +++ b/sdk/resourcemanager/eventhub/armeventhub/privateendpointconnections_live_test.go @@ -0,0 +1,245 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armeventhub_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/recording" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources" + "github.com/stretchr/testify/suite" +) + +type PrivateEndpointConnectionsTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + namespaceId string + namespaceName string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *PrivateEndpointConnectionsTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/eventhub/armeventhub/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.namespaceName, _ = recording.GenerateAlphaNumericID(testsuite.T(), "namespac", 14, false) + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name + testsuite.Prepare() +} + +func (testsuite *PrivateEndpointConnectionsTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestPrivateEndpointConnectionsTestSuite(t *testing.T) { + suite.Run(t, new(PrivateEndpointConnectionsTestSuite)) +} + +func (testsuite *PrivateEndpointConnectionsTestSuite) Prepare() { + var err error + // From step Namespaces_CreateOrUpdate + fmt.Println("Call operation: Namespaces_CreateOrUpdate") + namespacesClient, err := armeventhub.NewNamespacesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + namespacesClientCreateOrUpdateResponsePoller, err := namespacesClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.namespaceName, armeventhub.EHNamespace{ + Location: to.Ptr(testsuite.location), + SKU: &armeventhub.SKU{ + Name: to.Ptr(armeventhub.SKUNamePremium), + Tier: to.Ptr(armeventhub.SKUTierPremium), + }, + }, nil) + testsuite.Require().NoError(err) + var namespacesClientCreateOrUpdateResponse *armeventhub.NamespacesClientCreateOrUpdateResponse + namespacesClientCreateOrUpdateResponse, err = testutil.PollForTest(testsuite.ctx, namespacesClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + testsuite.namespaceId = *namespacesClientCreateOrUpdateResponse.ID + + // From step Create_PrivateEndpoint + template := map[string]any{ + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": map[string]any{ + "location": map[string]any{ + "type": "string", + "defaultValue": testsuite.location, + }, + "namespaceId": map[string]any{ + "type": "string", + "defaultValue": testsuite.namespaceId, + }, + "networkInterfaceName": map[string]any{ + "type": "string", + "defaultValue": "peeventhub-nic", + }, + "privateEndpointName": map[string]any{ + "type": "string", + "defaultValue": "peeventhub", + }, + "virtualNetworksName": map[string]any{ + "type": "string", + "defaultValue": "peeventhubvnet", + }, + }, + "resources": []any{ + map[string]any{ + "name": "[parameters('virtualNetworksName')]", + "type": "Microsoft.Network/virtualNetworks", + "apiVersion": "2020-11-01", + "location": "[parameters('location')]", + "properties": map[string]any{ + "addressSpace": map[string]any{ + "addressPrefixes": []any{ + "10.0.0.0/16", + }, + }, + "enableDdosProtection": false, + "subnets": []any{ + map[string]any{ + "name": "default", + "properties": map[string]any{ + "addressPrefix": "10.0.0.0/24", + "delegations": []any{}, + "privateEndpointNetworkPolicies": "Enabled", + "privateLinkServiceNetworkPolicies": "Enabled", + }, + }, + }, + "virtualNetworkPeerings": []any{}, + }, + }, + map[string]any{ + "name": "[parameters('networkInterfaceName')]", + "type": "Microsoft.Network/networkInterfaces", + "apiVersion": "2020-11-01", + "dependsOn": []any{ + "[resourceId('Microsoft.Network/virtualNetworks', parameters('virtualNetworksName'))]", + }, + "location": "[parameters('location')]", + "properties": map[string]any{ + "dnsSettings": map[string]any{ + "dnsServers": []any{}, + }, + "enableIPForwarding": false, + "ipConfigurations": []any{ + map[string]any{ + "name": "privateEndpointIpConfig", + "properties": map[string]any{ + "primary": true, + "privateIPAddress": "10.0.0.4", + "privateIPAddressVersion": "IPv4", + "privateIPAllocationMethod": "Dynamic", + "subnet": map[string]any{ + "id": "[resourceId('Microsoft.Network/virtualNetworks/subnets', parameters('virtualNetworksName'), 'default')]", + }, + }, + }, + }, + }, + }, + map[string]any{ + "name": "[parameters('privateEndpointName')]", + "type": "Microsoft.Network/privateEndpoints", + "apiVersion": "2020-11-01", + "dependsOn": []any{ + "[resourceId('Microsoft.Network/virtualNetworks', parameters('virtualNetworksName'))]", + }, + "location": "[parameters('location')]", + "properties": map[string]any{ + "customDnsConfigs": []any{}, + "manualPrivateLinkServiceConnections": []any{}, + "privateLinkServiceConnections": []any{ + map[string]any{ + "name": "[parameters('privateEndpointName')]", + "properties": map[string]any{ + "groupIds": []any{ + "namespace", + }, + "privateLinkServiceConnectionState": map[string]any{ + "description": "Auto-Approved", + "actionsRequired": "None", + "status": "Approved", + }, + "privateLinkServiceId": "[parameters('namespaceId')]", + }, + }, + }, + "subnet": map[string]any{ + "id": "[resourceId('Microsoft.Network/virtualNetworks/subnets', parameters('virtualNetworksName'), 'default')]", + }, + }, + }, + }, + "variables": map[string]any{}, + } + deployment := armresources.Deployment{ + Properties: &armresources.DeploymentProperties{ + Template: template, + Mode: to.Ptr(armresources.DeploymentModeIncremental), + }, + } + _, err = testutil.CreateDeployment(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName, "Create_PrivateEndpoint", &deployment) + testsuite.Require().NoError(err) +} + +// Microsoft.EventHub/namespaces/{namespaceName}/privateEndpointConnections/{privateEndpointConnectionName} +func (testsuite *PrivateEndpointConnectionsTestSuite) TestPrivateEndpointConnections() { + var privateEndpointConnectionName string + var err error + // From step PrivateEndpointConnections_List + fmt.Println("Call operation: PrivateEndpointConnections_List") + privateEndpointConnectionsClient, err := armeventhub.NewPrivateEndpointConnectionsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + privateEndpointConnectionsClientNewListPager := privateEndpointConnectionsClient.NewListPager(testsuite.resourceGroupName, testsuite.namespaceName, nil) + for privateEndpointConnectionsClientNewListPager.More() { + nextResult, err := privateEndpointConnectionsClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + + privateEndpointConnectionName = *nextResult.Value[0].Name + break + } + + // From step PrivateEndpointConnections_Get + fmt.Println("Call operation: PrivateEndpointConnections_Get") + _, err = privateEndpointConnectionsClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.namespaceName, privateEndpointConnectionName, nil) + testsuite.Require().NoError(err) + + // From step PrivateLinkResources_Get + fmt.Println("Call operation: PrivateLinkResources_Get") + privateLinkResourcesClient, err := armeventhub.NewPrivateLinkResourcesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + _, err = privateLinkResourcesClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.namespaceName, nil) + testsuite.Require().NoError(err) + + // From step PrivateEndpointConnections_Delete + fmt.Println("Call operation: PrivateEndpointConnections_Delete") + privateEndpointConnectionsClientDeleteResponsePoller, err := privateEndpointConnectionsClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.namespaceName, privateEndpointConnectionName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, privateEndpointConnectionsClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} From 5fa7df4852a100f4a7502d5064ae54b0aceb8260 Mon Sep 17 00:00:00 2001 From: Peng Jiahui <46921893+Alancere@users.noreply.github.com> Date: Thu, 4 May 2023 15:13:26 +0800 Subject: [PATCH 16/50] add sdk/resourcemanager/compute/armcompute live test (#20048) * add sdk/resourcemanager/compute/armcompute live test * skus filter * fix subscriptionId default value * fix * gofmt * update recording --- .../compute/armcompute/assets.json | 2 +- .../armcompute/availabilityset_live_test.go | 117 +++++ .../availabilitysets_client_live_test.go | 120 ----- .../capacityreservation_live_test.go | 122 +++++ .../armcompute/computerpcommon_live_test.go | 100 ++++ .../armcompute/dedicatedhost_live_test.go | 174 ++++++ .../compute/armcompute/disk_live_test.go | 137 +++++ .../armcompute/diskaccess_live_test.go | 129 +++++ .../compute/armcompute/gallery_live_test.go | 265 ++++++++++ sdk/resourcemanager/compute/armcompute/go.mod | 3 +- sdk/resourcemanager/compute/armcompute/go.sum | 7 +- .../proximityplacementgroup_live_test.go | 118 +++++ .../armcompute/restorepoint_live_test.go | 291 ++++++++++ .../armcompute/runcommand_live_test.go | 495 ++++++++++++++++++ .../compute/armcompute/skus_live_test.go | 73 +++ .../compute/armcompute/snapshot_live_test.go | 141 +++++ .../armcompute/sshpublickey_live_test.go | 113 ++++ .../armcompute/virtualmachine_live_test.go | 348 ++++++++++++ .../virtualmachineextensionimage_live_test.go | 82 +++ .../virtualmachineimage_live_test.go | 93 ++++ .../virtualmachines_client_live_test.go | 312 ----------- .../virtualmachinescaleset_live_test.go | 399 ++++++++++++++ ...irtualmachinescalesets_client_live_test.go | 195 ------- 23 files changed, 3202 insertions(+), 634 deletions(-) create mode 100644 sdk/resourcemanager/compute/armcompute/availabilityset_live_test.go delete mode 100644 sdk/resourcemanager/compute/armcompute/availabilitysets_client_live_test.go create mode 100644 sdk/resourcemanager/compute/armcompute/capacityreservation_live_test.go create mode 100644 sdk/resourcemanager/compute/armcompute/computerpcommon_live_test.go create mode 100644 sdk/resourcemanager/compute/armcompute/dedicatedhost_live_test.go create mode 100644 sdk/resourcemanager/compute/armcompute/disk_live_test.go create mode 100644 sdk/resourcemanager/compute/armcompute/diskaccess_live_test.go create mode 100644 sdk/resourcemanager/compute/armcompute/gallery_live_test.go create mode 100644 sdk/resourcemanager/compute/armcompute/proximityplacementgroup_live_test.go create mode 100644 sdk/resourcemanager/compute/armcompute/restorepoint_live_test.go create mode 100644 sdk/resourcemanager/compute/armcompute/runcommand_live_test.go create mode 100644 sdk/resourcemanager/compute/armcompute/skus_live_test.go create mode 100644 sdk/resourcemanager/compute/armcompute/snapshot_live_test.go create mode 100644 sdk/resourcemanager/compute/armcompute/sshpublickey_live_test.go create mode 100644 sdk/resourcemanager/compute/armcompute/virtualmachine_live_test.go create mode 100644 sdk/resourcemanager/compute/armcompute/virtualmachineextensionimage_live_test.go create mode 100644 sdk/resourcemanager/compute/armcompute/virtualmachineimage_live_test.go delete mode 100644 sdk/resourcemanager/compute/armcompute/virtualmachines_client_live_test.go create mode 100644 sdk/resourcemanager/compute/armcompute/virtualmachinescaleset_live_test.go delete mode 100644 sdk/resourcemanager/compute/armcompute/virtualmachinescalesets_client_live_test.go diff --git a/sdk/resourcemanager/compute/armcompute/assets.json b/sdk/resourcemanager/compute/armcompute/assets.json index 740c95e53803..ac6ee8d491e5 100644 --- a/sdk/resourcemanager/compute/armcompute/assets.json +++ b/sdk/resourcemanager/compute/armcompute/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "go", "TagPrefix": "go/resourcemanager/compute/armcompute", - "Tag": "go/resourcemanager/compute/armcompute_7dca156240" + "Tag": "go/resourcemanager/compute/armcompute_d7f582b36e" } diff --git a/sdk/resourcemanager/compute/armcompute/availabilityset_live_test.go b/sdk/resourcemanager/compute/armcompute/availabilityset_live_test.go new file mode 100644 index 000000000000..938bb22c9b5a --- /dev/null +++ b/sdk/resourcemanager/compute/armcompute/availabilityset_live_test.go @@ -0,0 +1,117 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/stretchr/testify/suite" +) + +type AvailabilitySetTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + location string + resourceGroupName string + subscriptionId string + availabilitySetName string +} + +func (testsuite *AvailabilitySetTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/compute/armcompute/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.availabilitySetName = testutil.GenerateAlphaNumericID(testsuite.T(), "availabili", 6) + testsuite.location = testutil.GetEnv("LOCATION", "eastus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name +} + +func (testsuite *AvailabilitySetTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestAvailabilitySetTestSuite(t *testing.T) { + suite.Run(t, new(AvailabilitySetTestSuite)) +} + +// Microsoft.Compute/availabilitySets +func (testsuite *AvailabilitySetTestSuite) TestAvailabilitySets() { + var err error + // From step AvailabilitySets_CreateOrUpdate + fmt.Println("Call operation: AvailabilitySets_CreateOrUpdate") + availabilitySetsClient, err := armcompute.NewAvailabilitySetsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + _, err = availabilitySetsClient.CreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.availabilitySetName, armcompute.AvailabilitySet{ + Location: to.Ptr(testsuite.location), + Properties: &armcompute.AvailabilitySetProperties{ + PlatformFaultDomainCount: to.Ptr[int32](2), + PlatformUpdateDomainCount: to.Ptr[int32](20), + }, + }, nil) + testsuite.Require().NoError(err) + + // From step AvailabilitySets_ListBySubscription + fmt.Println("Call operation: AvailabilitySets_ListBySubscription") + availabilitySetsClientNewListBySubscriptionPager := availabilitySetsClient.NewListBySubscriptionPager(&armcompute.AvailabilitySetsClientListBySubscriptionOptions{Expand: nil}) + for availabilitySetsClientNewListBySubscriptionPager.More() { + _, err := availabilitySetsClientNewListBySubscriptionPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step AvailabilitySets_List + fmt.Println("Call operation: AvailabilitySets_List") + availabilitySetsClientNewListPager := availabilitySetsClient.NewListPager(testsuite.resourceGroupName, nil) + for availabilitySetsClientNewListPager.More() { + _, err := availabilitySetsClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step AvailabilitySets_Get + fmt.Println("Call operation: AvailabilitySets_Get") + _, err = availabilitySetsClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.availabilitySetName, nil) + testsuite.Require().NoError(err) + + // From step AvailabilitySets_ListAvailableSizes + fmt.Println("Call operation: AvailabilitySets_ListAvailableSizes") + availabilitySetsClientNewListAvailableSizesPager := availabilitySetsClient.NewListAvailableSizesPager(testsuite.resourceGroupName, testsuite.availabilitySetName, nil) + for availabilitySetsClientNewListAvailableSizesPager.More() { + _, err := availabilitySetsClientNewListAvailableSizesPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step AvailabilitySets_Update + fmt.Println("Call operation: AvailabilitySets_Update") + _, err = availabilitySetsClient.Update(testsuite.ctx, testsuite.resourceGroupName, testsuite.availabilitySetName, armcompute.AvailabilitySetUpdate{}, nil) + testsuite.Require().NoError(err) + + // From step AvailabilitySets_Delete + fmt.Println("Call operation: AvailabilitySets_Delete") + _, err = availabilitySetsClient.Delete(testsuite.ctx, testsuite.resourceGroupName, testsuite.availabilitySetName, nil) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/compute/armcompute/availabilitysets_client_live_test.go b/sdk/resourcemanager/compute/armcompute/availabilitysets_client_live_test.go deleted file mode 100644 index 014e6b7e340c..000000000000 --- a/sdk/resourcemanager/compute/armcompute/availabilitysets_client_live_test.go +++ /dev/null @@ -1,120 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package armcompute_test - -import ( - "context" - "fmt" - "testing" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" - "github.com/stretchr/testify/suite" -) - -type AvailabilitySetsClientTestSuite struct { - suite.Suite - - ctx context.Context - cred azcore.TokenCredential - options *arm.ClientOptions - location string - resourceGroupName string - subscriptionID string -} - -func (testsuite *AvailabilitySetsClientTestSuite) SetupSuite() { - testsuite.ctx = context.Background() - testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) - testsuite.location = testutil.GetEnv("LOCATION", "eastus") - testsuite.subscriptionID = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") - testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/compute/armcompute/testdata") - resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionID, testsuite.cred, testsuite.options, testsuite.location) - testsuite.Require().NoError(err) - testsuite.resourceGroupName = *resourceGroup.Name -} - -func (testsuite *AvailabilitySetsClientTestSuite) TearDownSuite() { - _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionID, testsuite.cred, testsuite.options, testsuite.resourceGroupName) - testsuite.Require().NoError(err) - testutil.StopRecording(testsuite.T()) -} - -func TestAvailabilitySetsClient(t *testing.T) { - suite.Run(t, new(AvailabilitySetsClientTestSuite)) -} - -func (testsuite *AvailabilitySetsClientTestSuite) TestAvailabilitySetsCRUD() { - // create availability sets - fmt.Println("Call operation: AvailabilitySets_CreateOrUpdate") - client, err := armcompute.NewAvailabilitySetsClient(testsuite.subscriptionID, testsuite.cred, testsuite.options) - testsuite.Require().NoError(err) - name := "go-test-availability" - resp, err := client.CreateOrUpdate( - testsuite.ctx, - testsuite.resourceGroupName, - name, - armcompute.AvailabilitySet{ - Location: to.Ptr("westus"), - SKU: &armcompute.SKU{ - Name: to.Ptr(string(armcompute.AvailabilitySetSKUTypesAligned)), - }, - Properties: &armcompute.AvailabilitySetProperties{ - PlatformFaultDomainCount: to.Ptr[int32](1), - PlatformUpdateDomainCount: to.Ptr[int32](1), - }, - }, - nil, - ) - testsuite.Require().NoError(err) - testsuite.Require().Equal(*resp.Name, name) - - // get - fmt.Println("Call operation: AvailabilitySets_Get") - getResp, err := client.Get(testsuite.ctx, testsuite.resourceGroupName, name, nil) - testsuite.Require().NoError(err) - testsuite.Require().Equal(*getResp.Name, name) - - // list - fmt.Println("Call operation: AvailabilitySets_List") - listPager := client.NewListPager(testsuite.resourceGroupName, nil) - testsuite.Require().True(listPager.More()) - - // list available size - fmt.Println("Call operation: AvailabilitySets_ListAvailableSize") - listResp := client.NewListAvailableSizesPager(testsuite.resourceGroupName, name, nil) - testsuite.Require().True(listResp.More()) - - // list by subscription - fmt.Println("Call operation: AvailabilitySets_ListBySubscription") - listBySubscription := client.NewListBySubscriptionPager(nil) - testsuite.Require().True(listBySubscription.More()) - - // update - fmt.Println("Call operation: AvailabilitySets_Update") - updateResp, err := client.Update( - testsuite.ctx, - testsuite.resourceGroupName, - name, - armcompute.AvailabilitySetUpdate{ - Tags: map[string]*string{ - "tag": to.Ptr("value"), - }, - }, - nil, - ) - testsuite.Require().NoError(err) - testsuite.Require().Equal(name, *updateResp.Name) - - // delete - fmt.Println("Call operation: AvailabilitySets_Delete") - _, err = client.Delete(testsuite.ctx, testsuite.resourceGroupName, name, nil) - testsuite.Require().NoError(err) -} diff --git a/sdk/resourcemanager/compute/armcompute/capacityreservation_live_test.go b/sdk/resourcemanager/compute/armcompute/capacityreservation_live_test.go new file mode 100644 index 000000000000..768fca18eae0 --- /dev/null +++ b/sdk/resourcemanager/compute/armcompute/capacityreservation_live_test.go @@ -0,0 +1,122 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/stretchr/testify/suite" +) + +type CapacityReservationTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + capacityReservationGroupName string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *CapacityReservationTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/compute/armcompute/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.capacityReservationGroupName = testutil.GenerateAlphaNumericID(testsuite.T(), "capacityre", 6) + testsuite.location = testutil.GetEnv("LOCATION", "eastus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name + testsuite.Prepare() +} + +func (testsuite *CapacityReservationTestSuite) TearDownSuite() { + testsuite.Cleanup() + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestCapacityReservationTestSuite(t *testing.T) { + suite.Run(t, new(CapacityReservationTestSuite)) +} + +func (testsuite *CapacityReservationTestSuite) Prepare() { + var err error + // From step CapacityReservationGroups_CreateOrUpdate + fmt.Println("Call operation: CapacityReservationGroups_CreateOrUpdate") + capacityReservationGroupsClient, err := armcompute.NewCapacityReservationGroupsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + _, err = capacityReservationGroupsClient.CreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.capacityReservationGroupName, armcompute.CapacityReservationGroup{ + Location: to.Ptr(testsuite.location), + Tags: map[string]*string{ + "department": to.Ptr("finance"), + }, + Zones: []*string{ + to.Ptr("1"), + to.Ptr("2")}, + }, nil) + testsuite.Require().NoError(err) +} + +// Microsoft.Compute/capacityReservationGroups +func (testsuite *CapacityReservationTestSuite) TestCapacityReservationGroups() { + var err error + // From step CapacityReservationGroups_ListBySubscription + fmt.Println("Call operation: CapacityReservationGroups_ListBySubscription") + capacityReservationGroupsClient, err := armcompute.NewCapacityReservationGroupsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + capacityReservationGroupsClientNewListBySubscriptionPager := capacityReservationGroupsClient.NewListBySubscriptionPager(&armcompute.CapacityReservationGroupsClientListBySubscriptionOptions{Expand: to.Ptr(armcompute.ExpandTypesForGetCapacityReservationGroupsVirtualMachinesRef)}) + for capacityReservationGroupsClientNewListBySubscriptionPager.More() { + _, err := capacityReservationGroupsClientNewListBySubscriptionPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step CapacityReservationGroups_ListByResourceGroup + fmt.Println("Call operation: CapacityReservationGroups_ListByResourceGroup") + capacityReservationGroupsClientNewListByResourceGroupPager := capacityReservationGroupsClient.NewListByResourceGroupPager(testsuite.resourceGroupName, &armcompute.CapacityReservationGroupsClientListByResourceGroupOptions{Expand: to.Ptr(armcompute.ExpandTypesForGetCapacityReservationGroupsVirtualMachinesRef)}) + for capacityReservationGroupsClientNewListByResourceGroupPager.More() { + _, err := capacityReservationGroupsClientNewListByResourceGroupPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step CapacityReservationGroups_Get + fmt.Println("Call operation: CapacityReservationGroups_Get") + _, err = capacityReservationGroupsClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.capacityReservationGroupName, &armcompute.CapacityReservationGroupsClientGetOptions{Expand: to.Ptr(armcompute.CapacityReservationGroupInstanceViewTypesInstanceView)}) + testsuite.Require().NoError(err) + + // From step CapacityReservationGroups_Update + fmt.Println("Call operation: CapacityReservationGroups_Update") + _, err = capacityReservationGroupsClient.Update(testsuite.ctx, testsuite.resourceGroupName, testsuite.capacityReservationGroupName, armcompute.CapacityReservationGroupUpdate{}, nil) + testsuite.Require().NoError(err) +} + +func (testsuite *CapacityReservationTestSuite) Cleanup() { + var err error + // From step CapacityReservationGroups_Delete + fmt.Println("Call operation: CapacityReservationGroups_Delete") + capacityReservationGroupsClient, err := armcompute.NewCapacityReservationGroupsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + _, err = capacityReservationGroupsClient.Delete(testsuite.ctx, testsuite.resourceGroupName, testsuite.capacityReservationGroupName, nil) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/compute/armcompute/computerpcommon_live_test.go b/sdk/resourcemanager/compute/armcompute/computerpcommon_live_test.go new file mode 100644 index 000000000000..77ebfe856aca --- /dev/null +++ b/sdk/resourcemanager/compute/armcompute/computerpcommon_live_test.go @@ -0,0 +1,100 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/stretchr/testify/suite" +) + +type ComputeRpCommonTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *ComputeRpCommonTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/compute/armcompute/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.location = testutil.GetEnv("LOCATION", "eastus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name +} + +func (testsuite *ComputeRpCommonTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestComputeRpCommonTestSuite(t *testing.T) { + suite.Run(t, new(ComputeRpCommonTestSuite)) +} + +// Microsoft.Compute/operations +func (testsuite *ComputeRpCommonTestSuite) TestOperations() { + var err error + // From step Operations_List + fmt.Println("Call operation: Operations_List") + operationsClient, err := armcompute.NewOperationsClient(testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + operationsClientNewListPager := operationsClient.NewListPager(nil) + for operationsClientNewListPager.More() { + _, err := operationsClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } +} + +// Microsoft.Compute/locations/{location}/usages +func (testsuite *ComputeRpCommonTestSuite) TestUsage() { + var err error + // From step Usage_List + fmt.Println("Call operation: Usage_List") + usageClient, err := armcompute.NewUsageClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + usageClientNewListPager := usageClient.NewListPager(testsuite.location, nil) + for usageClientNewListPager.More() { + _, err := usageClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } +} + +// Microsoft.Compute/locations/{location}/vmSizes +func (testsuite *ComputeRpCommonTestSuite) TestVirtualMachineSizes() { + var err error + // From step VirtualMachineSizes_List + fmt.Println("Call operation: VirtualMachineSizes_List") + virtualMachineSizesClient, err := armcompute.NewVirtualMachineSizesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + virtualMachineSizesClientNewListPager := virtualMachineSizesClient.NewListPager(testsuite.location, nil) + for virtualMachineSizesClientNewListPager.More() { + _, err := virtualMachineSizesClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } +} diff --git a/sdk/resourcemanager/compute/armcompute/dedicatedhost_live_test.go b/sdk/resourcemanager/compute/armcompute/dedicatedhost_live_test.go new file mode 100644 index 000000000000..8852d1a2e308 --- /dev/null +++ b/sdk/resourcemanager/compute/armcompute/dedicatedhost_live_test.go @@ -0,0 +1,174 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/stretchr/testify/suite" +) + +type DedicatedHostTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + hostGroupName string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *DedicatedHostTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/compute/armcompute/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.hostGroupName = testutil.GenerateAlphaNumericID(testsuite.T(), "hostgroupn", 6) + testsuite.location = testutil.GetEnv("LOCATION", "eastus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name + testsuite.Prepare() +} + +func (testsuite *DedicatedHostTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestDedicatedHostTestSuite(t *testing.T) { + suite.Run(t, new(DedicatedHostTestSuite)) +} + +func (testsuite *DedicatedHostTestSuite) Prepare() { + var err error + // From step DedicatedHostGroups_CreateOrUpdate + fmt.Println("Call operation: DedicatedHostGroups_CreateOrUpdate") + dedicatedHostGroupsClient, err := armcompute.NewDedicatedHostGroupsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + _, err = dedicatedHostGroupsClient.CreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.hostGroupName, armcompute.DedicatedHostGroup{ + Location: to.Ptr(testsuite.location), + Tags: map[string]*string{ + "department": to.Ptr("finance"), + }, + Properties: &armcompute.DedicatedHostGroupProperties{ + PlatformFaultDomainCount: to.Ptr[int32](3), + SupportAutomaticPlacement: to.Ptr(true), + }, + Zones: []*string{ + to.Ptr("1")}, + }, nil) + testsuite.Require().NoError(err) +} + +// Microsoft.Compute/hostGroups +func (testsuite *DedicatedHostTestSuite) TestDedicatedHostGroups() { + var err error + // From step DedicatedHostGroups_ListBySubscription + fmt.Println("Call operation: DedicatedHostGroups_ListBySubscription") + dedicatedHostGroupsClient, err := armcompute.NewDedicatedHostGroupsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + dedicatedHostGroupsClientNewListBySubscriptionPager := dedicatedHostGroupsClient.NewListBySubscriptionPager(nil) + for dedicatedHostGroupsClientNewListBySubscriptionPager.More() { + _, err := dedicatedHostGroupsClientNewListBySubscriptionPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step DedicatedHostGroups_ListByResourceGroup + fmt.Println("Call operation: DedicatedHostGroups_ListByResourceGroup") + dedicatedHostGroupsClientNewListByResourceGroupPager := dedicatedHostGroupsClient.NewListByResourceGroupPager(testsuite.resourceGroupName, nil) + for dedicatedHostGroupsClientNewListByResourceGroupPager.More() { + _, err := dedicatedHostGroupsClientNewListByResourceGroupPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step DedicatedHostGroups_Get + fmt.Println("Call operation: DedicatedHostGroups_Get") + _, err = dedicatedHostGroupsClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.hostGroupName, &armcompute.DedicatedHostGroupsClientGetOptions{Expand: nil}) + testsuite.Require().NoError(err) + + // From step DedicatedHostGroups_Update + fmt.Println("Call operation: DedicatedHostGroups_Update") + _, err = dedicatedHostGroupsClient.Update(testsuite.ctx, testsuite.resourceGroupName, testsuite.hostGroupName, armcompute.DedicatedHostGroupUpdate{}, nil) + testsuite.Require().NoError(err) +} + +// Microsoft.Compute/hostGroups/hosts +func (testsuite *DedicatedHostTestSuite) TestDedicatedHosts() { + hostName := testutil.GenerateAlphaNumericID(testsuite.T(), "hostname", 6) + var err error + // From step DedicatedHosts_CreateOrUpdate + fmt.Println("Call operation: DedicatedHosts_CreateOrUpdate") + dedicatedHostsClient, err := armcompute.NewDedicatedHostsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + dedicatedHostsClientCreateOrUpdateResponsePoller, err := dedicatedHostsClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.hostGroupName, hostName, armcompute.DedicatedHost{ + Location: to.Ptr(testsuite.location), + Tags: map[string]*string{ + "department": to.Ptr("HR"), + }, + Properties: &armcompute.DedicatedHostProperties{ + PlatformFaultDomain: to.Ptr[int32](1), + }, + SKU: &armcompute.SKU{ + Name: to.Ptr("DSv3-Type1"), + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, dedicatedHostsClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step DedicatedHosts_ListByHostGroup + fmt.Println("Call operation: DedicatedHosts_ListByHostGroup") + dedicatedHostsClientNewListByHostGroupPager := dedicatedHostsClient.NewListByHostGroupPager(testsuite.resourceGroupName, testsuite.hostGroupName, nil) + for dedicatedHostsClientNewListByHostGroupPager.More() { + _, err := dedicatedHostsClientNewListByHostGroupPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step DedicatedHosts_Get + fmt.Println("Call operation: DedicatedHosts_Get") + _, err = dedicatedHostsClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.hostGroupName, hostName, &armcompute.DedicatedHostsClientGetOptions{Expand: nil}) + testsuite.Require().NoError(err) + + // From step DedicatedHosts_Update + fmt.Println("Call operation: DedicatedHosts_Update") + dedicatedHostsClientUpdateResponsePoller, err := dedicatedHostsClient.BeginUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.hostGroupName, hostName, armcompute.DedicatedHostUpdate{}, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, dedicatedHostsClientUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step DedicatedHosts_Restart + fmt.Println("Call operation: DedicatedHosts_Restart") + dedicatedHostsClientRestartResponsePoller, err := dedicatedHostsClient.BeginRestart(testsuite.ctx, testsuite.resourceGroupName, testsuite.hostGroupName, hostName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, dedicatedHostsClientRestartResponsePoller) + testsuite.Require().NoError(err) + + // From step DedicatedHosts_Delete + fmt.Println("Call operation: DedicatedHosts_Delete") + dedicatedHostsClientDeleteResponsePoller, err := dedicatedHostsClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.hostGroupName, hostName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, dedicatedHostsClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/compute/armcompute/disk_live_test.go b/sdk/resourcemanager/compute/armcompute/disk_live_test.go new file mode 100644 index 000000000000..bedc48da779c --- /dev/null +++ b/sdk/resourcemanager/compute/armcompute/disk_live_test.go @@ -0,0 +1,137 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/stretchr/testify/suite" +) + +type DiskTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + diskName string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *DiskTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/compute/armcompute/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.diskName = testutil.GenerateAlphaNumericID(testsuite.T(), "diskname", 6) + testsuite.location = testutil.GetEnv("LOCATION", "eastus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name +} + +func (testsuite *DiskTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestDiskTestSuite(t *testing.T) { + suite.Run(t, new(DiskTestSuite)) +} + +// Microsoft.Compute/disks/{diskName} +func (testsuite *DiskTestSuite) TestDisks() { + var err error + // From step Disks_CreateOrUpdate + fmt.Println("Call operation: Disks_CreateOrUpdate") + disksClient, err := armcompute.NewDisksClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + disksClientCreateOrUpdateResponsePoller, err := disksClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.diskName, armcompute.Disk{ + Location: to.Ptr(testsuite.location), + Properties: &armcompute.DiskProperties{ + CreationData: &armcompute.CreationData{ + CreateOption: to.Ptr(armcompute.DiskCreateOptionEmpty), + }, + DiskSizeGB: to.Ptr[int32](200), + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, disksClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step Disks_List + fmt.Println("Call operation: Disks_List") + disksClientNewListPager := disksClient.NewListPager(nil) + for disksClientNewListPager.More() { + _, err := disksClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step Disks_ListByResourceGroup + fmt.Println("Call operation: Disks_ListByResourceGroup") + disksClientNewListByResourceGroupPager := disksClient.NewListByResourceGroupPager(testsuite.resourceGroupName, nil) + for disksClientNewListByResourceGroupPager.More() { + _, err := disksClientNewListByResourceGroupPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step Disks_Get + fmt.Println("Call operation: Disks_Get") + _, err = disksClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.diskName, nil) + testsuite.Require().NoError(err) + + // From step Disks_Update + fmt.Println("Call operation: Disks_Update") + disksClientUpdateResponsePoller, err := disksClient.BeginUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.diskName, armcompute.DiskUpdate{ + Properties: &armcompute.DiskUpdateProperties{ + NetworkAccessPolicy: to.Ptr(armcompute.NetworkAccessPolicyAllowAll), + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, disksClientUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step Disks_GrantAccess + fmt.Println("Call operation: Disks_GrantAccess") + disksClientGrantAccessResponsePoller, err := disksClient.BeginGrantAccess(testsuite.ctx, testsuite.resourceGroupName, testsuite.diskName, armcompute.GrantAccessData{ + Access: to.Ptr(armcompute.AccessLevelRead), + DurationInSeconds: to.Ptr[int32](300), + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, disksClientGrantAccessResponsePoller) + testsuite.Require().NoError(err) + + // From step Disks_RevokeAccess + fmt.Println("Call operation: Disks_RevokeAccess") + disksClientRevokeAccessResponsePoller, err := disksClient.BeginRevokeAccess(testsuite.ctx, testsuite.resourceGroupName, testsuite.diskName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, disksClientRevokeAccessResponsePoller) + testsuite.Require().NoError(err) + + // From step Disks_Delete + fmt.Println("Call operation: Disks_Delete") + disksClientDeleteResponsePoller, err := disksClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.diskName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, disksClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/compute/armcompute/diskaccess_live_test.go b/sdk/resourcemanager/compute/armcompute/diskaccess_live_test.go new file mode 100644 index 000000000000..8819fc58dd5c --- /dev/null +++ b/sdk/resourcemanager/compute/armcompute/diskaccess_live_test.go @@ -0,0 +1,129 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/stretchr/testify/suite" +) + +type DiskAccessTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + diskAccessName string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *DiskAccessTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/compute/armcompute/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.diskAccessName = testutil.GenerateAlphaNumericID(testsuite.T(), "diskaccess", 6) + testsuite.location = testutil.GetEnv("LOCATION", "eastus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name +} + +func (testsuite *DiskAccessTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestDiskAccessTestSuite(t *testing.T) { + suite.Run(t, new(DiskAccessTestSuite)) +} + +// Microsoft.Compute/diskAccesses/{diskAccessName} +func (testsuite *DiskAccessTestSuite) TestDiskAccesses() { + var err error + // From step DiskAccesses_CreateOrUpdate + fmt.Println("Call operation: DiskAccesses_CreateOrUpdate") + diskAccessesClient, err := armcompute.NewDiskAccessesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + diskAccessesClientCreateOrUpdateResponsePoller, err := diskAccessesClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.diskAccessName, armcompute.DiskAccess{ + Location: to.Ptr(testsuite.location), + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, diskAccessesClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step DiskAccesses_List + fmt.Println("Call operation: DiskAccesses_List") + diskAccessesClientNewListPager := diskAccessesClient.NewListPager(nil) + for diskAccessesClientNewListPager.More() { + _, err := diskAccessesClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step DiskAccesses_ListPrivateEndpointConnections + fmt.Println("Call operation: DiskAccesses_ListPrivateEndpointConnections") + diskAccessesClientNewListPrivateEndpointConnectionsPager := diskAccessesClient.NewListPrivateEndpointConnectionsPager(testsuite.resourceGroupName, testsuite.diskAccessName, nil) + for diskAccessesClientNewListPrivateEndpointConnectionsPager.More() { + _, err := diskAccessesClientNewListPrivateEndpointConnectionsPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step DiskAccesses_GetPrivateLinkResources + fmt.Println("Call operation: DiskAccesses_GetPrivateLinkResources") + _, err = diskAccessesClient.GetPrivateLinkResources(testsuite.ctx, testsuite.resourceGroupName, testsuite.diskAccessName, nil) + testsuite.Require().NoError(err) + + // From step DiskAccesses_ListByResourceGroup + fmt.Println("Call operation: DiskAccesses_ListByResourceGroup") + diskAccessesClientNewListByResourceGroupPager := diskAccessesClient.NewListByResourceGroupPager(testsuite.resourceGroupName, nil) + for diskAccessesClientNewListByResourceGroupPager.More() { + _, err := diskAccessesClientNewListByResourceGroupPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step DiskAccesses_Get + fmt.Println("Call operation: DiskAccesses_Get") + _, err = diskAccessesClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.diskAccessName, nil) + testsuite.Require().NoError(err) + + // From step DiskAccesses_Update + fmt.Println("Call operation: DiskAccesses_Update") + diskAccessesClientUpdateResponsePoller, err := diskAccessesClient.BeginUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.diskAccessName, armcompute.DiskAccessUpdate{ + Tags: map[string]*string{ + "department": to.Ptr("Development"), + "project": to.Ptr("PrivateEndpoints"), + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, diskAccessesClientUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step DiskAccesses_Delete + fmt.Println("Call operation: DiskAccesses_Delete") + diskAccessesClientDeleteResponsePoller, err := diskAccessesClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.diskAccessName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, diskAccessesClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/compute/armcompute/gallery_live_test.go b/sdk/resourcemanager/compute/armcompute/gallery_live_test.go new file mode 100644 index 000000000000..a62b519b428b --- /dev/null +++ b/sdk/resourcemanager/compute/armcompute/gallery_live_test.go @@ -0,0 +1,265 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/stretchr/testify/suite" +) + +type GalleryTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + galleryApplicationName string + galleryImageName string + galleryName string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *GalleryTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/compute/armcompute/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.galleryApplicationName = testutil.GenerateAlphaNumericID(testsuite.T(), "galleryapp", 6) + testsuite.galleryImageName = testutil.GenerateAlphaNumericID(testsuite.T(), "galleryima", 6) + testsuite.galleryName = testutil.GenerateAlphaNumericID(testsuite.T(), "gallerynam", 6) + testsuite.location = testutil.GetEnv("LOCATION", "eastus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name + testsuite.Prepare() +} + +func (testsuite *GalleryTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestGalleryTestSuite(t *testing.T) { + suite.Run(t, new(GalleryTestSuite)) +} + +func (testsuite *GalleryTestSuite) Prepare() { + var err error + // From step Galleries_CreateOrUpdate + fmt.Println("Call operation: Galleries_CreateOrUpdate") + galleriesClient, err := armcompute.NewGalleriesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + galleriesClientCreateOrUpdateResponsePoller, err := galleriesClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.galleryName, armcompute.Gallery{ + Location: to.Ptr(testsuite.location), + Properties: &armcompute.GalleryProperties{ + Description: to.Ptr("This is the gallery description."), + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, galleriesClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) +} + +// Microsoft.Compute/galleries/{galleryName} +func (testsuite *GalleryTestSuite) TestGalleries() { + var err error + // From step Galleries_List + fmt.Println("Call operation: Galleries_List") + galleriesClient, err := armcompute.NewGalleriesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + galleriesClientNewListPager := galleriesClient.NewListPager(nil) + for galleriesClientNewListPager.More() { + _, err := galleriesClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step Galleries_ListByResourceGroup + fmt.Println("Call operation: Galleries_ListByResourceGroup") + galleriesClientNewListByResourceGroupPager := galleriesClient.NewListByResourceGroupPager(testsuite.resourceGroupName, nil) + for galleriesClientNewListByResourceGroupPager.More() { + _, err := galleriesClientNewListByResourceGroupPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step Galleries_Get + fmt.Println("Call operation: Galleries_Get") + _, err = galleriesClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.galleryName, &armcompute.GalleriesClientGetOptions{Select: nil, + Expand: nil, + }) + testsuite.Require().NoError(err) + + // From step Galleries_Update + fmt.Println("Call operation: Galleries_Update") + galleriesClientUpdateResponsePoller, err := galleriesClient.BeginUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.galleryName, armcompute.GalleryUpdate{ + Properties: &armcompute.GalleryProperties{ + Description: to.Ptr("This is the gallery description."), + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, galleriesClientUpdateResponsePoller) + testsuite.Require().NoError(err) +} + +// Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName} +func (testsuite *GalleryTestSuite) TestGalleryImages() { + var err error + // From step GalleryImages_CreateOrUpdate + fmt.Println("Call operation: GalleryImages_CreateOrUpdate") + galleryImagesClient, err := armcompute.NewGalleryImagesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + galleryImagesClientCreateOrUpdateResponsePoller, err := galleryImagesClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.galleryName, testsuite.galleryImageName, armcompute.GalleryImage{ + Location: to.Ptr(testsuite.location), + Properties: &armcompute.GalleryImageProperties{ + HyperVGeneration: to.Ptr(armcompute.HyperVGenerationV1), + Identifier: &armcompute.GalleryImageIdentifier{ + Offer: to.Ptr("myOfferName"), + Publisher: to.Ptr("myPublisherName"), + SKU: to.Ptr("mySkuName"), + }, + OSState: to.Ptr(armcompute.OperatingSystemStateTypesGeneralized), + OSType: to.Ptr(armcompute.OperatingSystemTypesWindows), + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, galleryImagesClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step GalleryImages_ListByGallery + fmt.Println("Call operation: GalleryImages_ListByGallery") + galleryImagesClientNewListByGalleryPager := galleryImagesClient.NewListByGalleryPager(testsuite.resourceGroupName, testsuite.galleryName, nil) + for galleryImagesClientNewListByGalleryPager.More() { + _, err := galleryImagesClientNewListByGalleryPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step GalleryImages_Get + fmt.Println("Call operation: GalleryImages_Get") + _, err = galleryImagesClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.galleryName, testsuite.galleryImageName, nil) + testsuite.Require().NoError(err) + + // From step GalleryImages_Update + fmt.Println("Call operation: GalleryImages_Update") + galleryImagesClientUpdateResponsePoller, err := galleryImagesClient.BeginUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.galleryName, testsuite.galleryImageName, armcompute.GalleryImageUpdate{ + Tags: map[string]*string{ + "0": to.Ptr("[object Object]"), + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, galleryImagesClientUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step GalleryImages_Delete + fmt.Println("Call operation: GalleryImages_Delete") + galleryImagesClientDeleteResponsePoller, err := galleryImagesClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.galleryName, testsuite.galleryImageName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, galleryImagesClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} + +// Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName} +func (testsuite *GalleryTestSuite) TestGalleryApplications() { + var err error + // From step GalleryApplications_CreateOrUpdate + fmt.Println("Call operation: GalleryApplications_CreateOrUpdate") + galleryApplicationsClient, err := armcompute.NewGalleryApplicationsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + galleryApplicationsClientCreateOrUpdateResponsePoller, err := galleryApplicationsClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.galleryName, testsuite.galleryApplicationName, armcompute.GalleryApplication{ + Location: to.Ptr(testsuite.location), + Properties: &armcompute.GalleryApplicationProperties{ + Description: to.Ptr("This is the gallery application description."), + CustomActions: []*armcompute.GalleryApplicationCustomAction{ + { + Name: to.Ptr("myCustomAction"), + Description: to.Ptr("This is the custom action description."), + Parameters: []*armcompute.GalleryApplicationCustomActionParameter{ + { + Name: to.Ptr("myCustomActionParameter"), + Type: to.Ptr(armcompute.GalleryApplicationCustomActionParameterTypeString), + Description: to.Ptr("This is the description of the parameter"), + DefaultValue: to.Ptr("default value of parameter."), + Required: to.Ptr(false), + }}, + Script: to.Ptr("myCustomActionScript"), + }}, + Eula: to.Ptr("This is the gallery application EULA."), + PrivacyStatementURI: to.Ptr("myPrivacyStatementUri}"), + ReleaseNoteURI: to.Ptr("myReleaseNoteUri"), + SupportedOSType: to.Ptr(armcompute.OperatingSystemTypesWindows), + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, galleryApplicationsClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step GalleryApplications_ListByGallery + fmt.Println("Call operation: GalleryApplications_ListByGallery") + galleryApplicationsClientNewListByGalleryPager := galleryApplicationsClient.NewListByGalleryPager(testsuite.resourceGroupName, testsuite.galleryName, nil) + for galleryApplicationsClientNewListByGalleryPager.More() { + _, err := galleryApplicationsClientNewListByGalleryPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step GalleryApplications_Get + fmt.Println("Call operation: GalleryApplications_Get") + _, err = galleryApplicationsClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.galleryName, testsuite.galleryApplicationName, nil) + testsuite.Require().NoError(err) + + // From step GalleryApplications_Update + fmt.Println("Call operation: GalleryApplications_Update") + galleryApplicationsClientUpdateResponsePoller, err := galleryApplicationsClient.BeginUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.galleryName, testsuite.galleryApplicationName, armcompute.GalleryApplicationUpdate{ + Properties: &armcompute.GalleryApplicationProperties{ + Description: to.Ptr("This is the gallery application description."), + CustomActions: []*armcompute.GalleryApplicationCustomAction{ + { + Name: to.Ptr("myCustomAction"), + Description: to.Ptr("This is the custom action description."), + Parameters: []*armcompute.GalleryApplicationCustomActionParameter{ + { + Name: to.Ptr("myCustomActionParameter"), + Type: to.Ptr(armcompute.GalleryApplicationCustomActionParameterTypeString), + Description: to.Ptr("This is the description of the parameter"), + DefaultValue: to.Ptr("default value of parameter."), + Required: to.Ptr(false), + }}, + Script: to.Ptr("myCustomActionScript"), + }}, + Eula: to.Ptr("This is the gallery application EULA."), + PrivacyStatementURI: to.Ptr("myPrivacyStatementUri}"), + ReleaseNoteURI: to.Ptr("myReleaseNoteUri"), + SupportedOSType: to.Ptr(armcompute.OperatingSystemTypesWindows), + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, galleryApplicationsClientUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step GalleryApplications_Delete + fmt.Println("Call operation: GalleryApplications_Delete") + galleryApplicationsClientDeleteResponsePoller, err := galleryApplicationsClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.galleryName, testsuite.galleryApplicationName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, galleryApplicationsClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/compute/armcompute/go.mod b/sdk/resourcemanager/compute/armcompute/go.mod index 911c7d63ec64..3527d6bde3ca 100644 --- a/sdk/resourcemanager/compute/armcompute/go.mod +++ b/sdk/resourcemanager/compute/armcompute/go.mod @@ -6,13 +6,12 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.1.2 - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork v1.0.0 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1 github.com/stretchr/testify v1.7.0 ) require ( github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.0.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dnaeon/go-vcr v1.1.0 // indirect diff --git a/sdk/resourcemanager/compute/armcompute/go.sum b/sdk/resourcemanager/compute/armcompute/go.sum index bf1cdbf4fff0..69e6a668b452 100644 --- a/sdk/resourcemanager/compute/armcompute/go.sum +++ b/sdk/resourcemanager/compute/armcompute/go.sum @@ -6,10 +6,9 @@ github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0 h1:leh5DwKv6Ihwi+h60uHtn6U github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.1.2 h1:mLY+pNLjCUeKhgnAJWAKhEUQM+RJQo2H1fuGSw1Ky1E= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.1.2/go.mod h1:FbdwsQ2EzwvXxOPcMFYO8ogEc9uMMIj3YkmCdXdAFmk= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork v1.0.0 h1:nBy98uKOIfun5z6wx6jwWLrULcM0+cjBalBFZlEZ7CA= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork v1.0.0/go.mod h1:243D9iHbcQXoFUtgHJwL7gl2zx1aDuDMjvBZVGr2uW0= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.0.0 h1:ECsQtyERDVz3NP3kvDOTLvbQhqWp/x9EsGKtb4ogUr8= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.0.0/go.mod h1:s1tW/At+xHqjNFvWU4G0c0Qv33KOhvbGNj0RCTQDV8s= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/managementgroups/armmanagementgroups v1.0.0 h1:pPvTJ1dY0sA35JOeFq6TsY2xj6Z85Yo23Pj4wCCvu4o= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1 h1:7CBQ+Ei8SP2c6ydQTGCCrS35bDxgTMfoP2miAwK++OU= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1/go.mod h1:c/wcGeGx5FUPbM/JltUYHZcKmigwyVLJlDq+4HdtXaw= github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0 h1:VgSJlZH5u0k2qxSpqyghcFQKmvYckj46uymKK5XzkBM= github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0/go.mod h1:BDJ5qMFKx9DugEg3+uQSDCdbYPr5s9vBTrL9P8TpqOU= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= diff --git a/sdk/resourcemanager/compute/armcompute/proximityplacementgroup_live_test.go b/sdk/resourcemanager/compute/armcompute/proximityplacementgroup_live_test.go new file mode 100644 index 000000000000..47694732ad4b --- /dev/null +++ b/sdk/resourcemanager/compute/armcompute/proximityplacementgroup_live_test.go @@ -0,0 +1,118 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/stretchr/testify/suite" +) + +type ProximityPlacementGroupTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + location string + proximityPlacementGroupName string + resourceGroupName string + subscriptionId string +} + +func (testsuite *ProximityPlacementGroupTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/compute/armcompute/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.proximityPlacementGroupName = testutil.GenerateAlphaNumericID(testsuite.T(), "proximityp", 6) + testsuite.location = testutil.GetEnv("LOCATION", "eastus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name +} + +func (testsuite *ProximityPlacementGroupTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestProximityPlacementGroupTestSuite(t *testing.T) { + suite.Run(t, new(ProximityPlacementGroupTestSuite)) +} + +// Microsoft.Compute/proximityPlacementGroups +func (testsuite *ProximityPlacementGroupTestSuite) TestProximityPlacementGroups() { + var err error + // From step ProximityPlacementGroups_CreateOrUpdate + fmt.Println("Call operation: ProximityPlacementGroups_CreateOrUpdate") + proximityPlacementGroupsClient, err := armcompute.NewProximityPlacementGroupsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + _, err = proximityPlacementGroupsClient.CreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.proximityPlacementGroupName, armcompute.ProximityPlacementGroup{ + Location: to.Ptr(testsuite.location), + Properties: &armcompute.ProximityPlacementGroupProperties{ + Intent: &armcompute.ProximityPlacementGroupPropertiesIntent{ + VMSizes: []*string{ + to.Ptr("Basic_A0"), + to.Ptr("Basic_A2")}, + }, + ProximityPlacementGroupType: to.Ptr(armcompute.ProximityPlacementGroupTypeStandard), + }, + Zones: []*string{ + to.Ptr("1")}, + }, nil) + testsuite.Require().NoError(err) + + // From step ProximityPlacementGroups_ListBySubscription + fmt.Println("Call operation: ProximityPlacementGroups_ListBySubscription") + proximityPlacementGroupsClientNewListBySubscriptionPager := proximityPlacementGroupsClient.NewListBySubscriptionPager(nil) + for proximityPlacementGroupsClientNewListBySubscriptionPager.More() { + _, err := proximityPlacementGroupsClientNewListBySubscriptionPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step ProximityPlacementGroups_ListByResourceGroup + fmt.Println("Call operation: ProximityPlacementGroups_ListByResourceGroup") + proximityPlacementGroupsClientNewListByResourceGroupPager := proximityPlacementGroupsClient.NewListByResourceGroupPager(testsuite.resourceGroupName, nil) + for proximityPlacementGroupsClientNewListByResourceGroupPager.More() { + _, err := proximityPlacementGroupsClientNewListByResourceGroupPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step ProximityPlacementGroups_Get + fmt.Println("Call operation: ProximityPlacementGroups_Get") + _, err = proximityPlacementGroupsClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.proximityPlacementGroupName, &armcompute.ProximityPlacementGroupsClientGetOptions{IncludeColocationStatus: nil}) + testsuite.Require().NoError(err) + + // From step ProximityPlacementGroups_Update + fmt.Println("Call operation: ProximityPlacementGroups_Update") + _, err = proximityPlacementGroupsClient.Update(testsuite.ctx, testsuite.resourceGroupName, testsuite.proximityPlacementGroupName, armcompute.ProximityPlacementGroupUpdate{ + Tags: map[string]*string{ + "additionalProp1": to.Ptr("string"), + }, + }, nil) + testsuite.Require().NoError(err) + + // From step ProximityPlacementGroups_Delete + fmt.Println("Call operation: ProximityPlacementGroups_Delete") + _, err = proximityPlacementGroupsClient.Delete(testsuite.ctx, testsuite.resourceGroupName, testsuite.proximityPlacementGroupName, nil) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/compute/armcompute/restorepoint_live_test.go b/sdk/resourcemanager/compute/armcompute/restorepoint_live_test.go new file mode 100644 index 000000000000..cf14b432a779 --- /dev/null +++ b/sdk/resourcemanager/compute/armcompute/restorepoint_live_test.go @@ -0,0 +1,291 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources" + "github.com/stretchr/testify/suite" +) + +type RestorePointTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + adminUsername string + networkInterfaceId string + networkInterfaceName string + restorePointCollectionName string + virtaulMachineId string + virtualNetworksName string + vmName string + adminPassword string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *RestorePointTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/compute/armcompute/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.adminUsername = testutil.GenerateAlphaNumericID(testsuite.T(), "rp", 6) + testsuite.networkInterfaceName = testutil.GenerateAlphaNumericID(testsuite.T(), "vmnicrp", 6) + testsuite.restorePointCollectionName = testutil.GenerateAlphaNumericID(testsuite.T(), "restorepoi", 6) + testsuite.virtualNetworksName = testutil.GenerateAlphaNumericID(testsuite.T(), "vmvnetrp", 6) + testsuite.vmName = testutil.GenerateAlphaNumericID(testsuite.T(), "vmnamerp", 6) + testsuite.adminPassword = testutil.GetEnv("ADMIN_PASSWORD", "") + testsuite.location = testutil.GetEnv("LOCATION", "eastus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name + testsuite.Prepare() +} + +func (testsuite *RestorePointTestSuite) TearDownSuite() { + testsuite.Cleanup() + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestRestorePointTestSuite(t *testing.T) { + suite.Run(t, new(RestorePointTestSuite)) +} + +func (testsuite *RestorePointTestSuite) Prepare() { + var err error + // From step Create_NetworkInterface + template := map[string]any{ + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "outputs": map[string]any{ + "networkInterfaceId": map[string]any{ + "type": "string", + "value": "[resourceId('Microsoft.Network/networkInterfaces', parameters('networkInterfaceName'))]", + }, + }, + "parameters": map[string]any{ + "location": map[string]any{ + "type": "string", + "defaultValue": testsuite.location, + }, + "networkInterfaceName": map[string]any{ + "type": "string", + "defaultValue": testsuite.networkInterfaceName, + }, + "virtualNetworksName": map[string]any{ + "type": "string", + "defaultValue": testsuite.virtualNetworksName, + }, + }, + "resources": []any{ + map[string]any{ + "name": "[parameters('virtualNetworksName')]", + "type": "Microsoft.Network/virtualNetworks", + "apiVersion": "2021-05-01", + "location": "[parameters('location')]", + "properties": map[string]any{ + "addressSpace": map[string]any{ + "addressPrefixes": []any{ + "10.0.0.0/16", + }, + }, + "subnets": []any{ + map[string]any{ + "name": "default", + "properties": map[string]any{ + "addressPrefix": "10.0.0.0/24", + }, + }, + }, + }, + }, + map[string]any{ + "name": "[parameters('networkInterfaceName')]", + "type": "Microsoft.Network/networkInterfaces", + "apiVersion": "2021-08-01", + "dependsOn": []any{ + "[resourceId('Microsoft.Network/virtualNetworks', parameters('virtualNetworksName'))]", + }, + "location": "[parameters('location')]", + "properties": map[string]any{ + "ipConfigurations": []any{ + map[string]any{ + "name": "Ipv4config", + "properties": map[string]any{ + "subnet": map[string]any{ + "id": "[resourceId('Microsoft.Network/virtualNetworks/subnets', parameters('virtualNetworksName'), 'default')]", + }, + }, + }, + }, + }, + }, + }, + } + deployment := armresources.Deployment{ + Properties: &armresources.DeploymentProperties{ + Template: template, + Mode: to.Ptr(armresources.DeploymentModeIncremental), + }, + } + deploymentExtend, err := testutil.CreateDeployment(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName, "Create_NetworkInterface", &deployment) + testsuite.Require().NoError(err) + testsuite.networkInterfaceId = deploymentExtend.Properties.Outputs.(map[string]interface{})["networkInterfaceId"].(map[string]interface{})["value"].(string) + + // From step VirtualMachines_CreateOrUpdate + fmt.Println("Call operation: VirtualMachines_CreateOrUpdate") + virtualMachinesClient, err := armcompute.NewVirtualMachinesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + virtualMachinesClientCreateOrUpdateResponsePoller, err := virtualMachinesClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmName, armcompute.VirtualMachine{ + Location: to.Ptr(testsuite.location), + Properties: &armcompute.VirtualMachineProperties{ + HardwareProfile: &armcompute.HardwareProfile{ + VMSize: to.Ptr(armcompute.VirtualMachineSizeTypesStandardD1V2), + }, + NetworkProfile: &armcompute.NetworkProfile{ + NetworkInterfaces: []*armcompute.NetworkInterfaceReference{ + { + ID: to.Ptr(testsuite.networkInterfaceId), + Properties: &armcompute.NetworkInterfaceReferenceProperties{ + Primary: to.Ptr(true), + }, + }}, + }, + OSProfile: &armcompute.OSProfile{ + AdminPassword: to.Ptr(testsuite.adminPassword), + AdminUsername: to.Ptr(testsuite.adminUsername), + ComputerName: to.Ptr(testsuite.vmName), + }, + StorageProfile: &armcompute.StorageProfile{ + ImageReference: &armcompute.ImageReference{ + Offer: to.Ptr("WindowsServer"), + Publisher: to.Ptr("MicrosoftWindowsServer"), + SKU: to.Ptr("2016-Datacenter"), + Version: to.Ptr("latest"), + }, + OSDisk: &armcompute.OSDisk{ + Name: to.Ptr(testsuite.vmName + "osdisk"), + Caching: to.Ptr(armcompute.CachingTypesReadWrite), + CreateOption: to.Ptr(armcompute.DiskCreateOptionTypesFromImage), + ManagedDisk: &armcompute.ManagedDiskParameters{ + StorageAccountType: to.Ptr(armcompute.StorageAccountTypesStandardLRS), + }, + }, + }, + }, + }, nil) + testsuite.Require().NoError(err) + var virtualMachinesClientCreateOrUpdateResponse *armcompute.VirtualMachinesClientCreateOrUpdateResponse + virtualMachinesClientCreateOrUpdateResponse, err = testutil.PollForTest(testsuite.ctx, virtualMachinesClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + testsuite.virtaulMachineId = *virtualMachinesClientCreateOrUpdateResponse.ID + + // From step RestorePointCollections_CreateOrUpdate + fmt.Println("Call operation: RestorePointCollections_CreateOrUpdate") + restorePointCollectionsClient, err := armcompute.NewRestorePointCollectionsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + _, err = restorePointCollectionsClient.CreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.restorePointCollectionName, armcompute.RestorePointCollection{ + Location: to.Ptr(testsuite.location), + Tags: map[string]*string{ + "myTag1": to.Ptr("tagValue1"), + }, + Properties: &armcompute.RestorePointCollectionProperties{ + Source: &armcompute.RestorePointCollectionSourceProperties{ + ID: to.Ptr(testsuite.virtaulMachineId), + }, + }, + }, nil) + testsuite.Require().NoError(err) +} + +// Microsoft.Compute/restorePointCollections +func (testsuite *RestorePointTestSuite) TestRestorePointCollections() { + var err error + // From step RestorePointCollections_ListAll + fmt.Println("Call operation: RestorePointCollections_ListAll") + restorePointCollectionsClient, err := armcompute.NewRestorePointCollectionsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + restorePointCollectionsClientNewListAllPager := restorePointCollectionsClient.NewListAllPager(nil) + for restorePointCollectionsClientNewListAllPager.More() { + _, err := restorePointCollectionsClientNewListAllPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step RestorePointCollections_List + fmt.Println("Call operation: RestorePointCollections_List") + restorePointCollectionsClientNewListPager := restorePointCollectionsClient.NewListPager(testsuite.resourceGroupName, nil) + for restorePointCollectionsClientNewListPager.More() { + _, err := restorePointCollectionsClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step RestorePointCollections_Get + fmt.Println("Call operation: RestorePointCollections_Get") + _, err = restorePointCollectionsClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.restorePointCollectionName, &armcompute.RestorePointCollectionsClientGetOptions{Expand: nil}) + testsuite.Require().NoError(err) +} + +// Microsoft.Compute/restorePointCollections/restorePoints +func (testsuite *RestorePointTestSuite) TestRestorePoints() { + restorePointName := testutil.GenerateAlphaNumericID(testsuite.T(), "restorepoi", 6) + var err error + // From step RestorePoints_Create + fmt.Println("Call operation: RestorePoints_Create") + restorePointsClient, err := armcompute.NewRestorePointsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + restorePointsClientCreateResponsePoller, err := restorePointsClient.BeginCreate(testsuite.ctx, testsuite.resourceGroupName, testsuite.restorePointCollectionName, restorePointName, armcompute.RestorePoint{}, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, restorePointsClientCreateResponsePoller) + testsuite.Require().NoError(err) + + // From step RestorePoints_Get + fmt.Println("Call operation: RestorePoints_Get") + _, err = restorePointsClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.restorePointCollectionName, restorePointName, &armcompute.RestorePointsClientGetOptions{Expand: nil}) + testsuite.Require().NoError(err) + + // From step RestorePoints_Delete + fmt.Println("Call operation: RestorePoints_Delete") + restorePointsClientDeleteResponsePoller, err := restorePointsClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.restorePointCollectionName, restorePointName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, restorePointsClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} + +func (testsuite *RestorePointTestSuite) Cleanup() { + var err error + // From step RestorePointCollections_Update + restorePointCollectionsClient, err := armcompute.NewRestorePointCollectionsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + fmt.Println("Call operation: RestorePointCollections_Update") + _, err = restorePointCollectionsClient.Update(testsuite.ctx, testsuite.resourceGroupName, testsuite.restorePointCollectionName, armcompute.RestorePointCollectionUpdate{}, nil) + testsuite.Require().NoError(err) + // From step RestorePointCollections_Delete + fmt.Println("Call operation: RestorePointCollections_Delete") + restorePointCollectionsClientDeleteResponsePoller, err := restorePointCollectionsClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.restorePointCollectionName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, restorePointCollectionsClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/compute/armcompute/runcommand_live_test.go b/sdk/resourcemanager/compute/armcompute/runcommand_live_test.go new file mode 100644 index 000000000000..6f7874afa6d4 --- /dev/null +++ b/sdk/resourcemanager/compute/armcompute/runcommand_live_test.go @@ -0,0 +1,495 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources" + "github.com/stretchr/testify/suite" +) + +type RunCommandTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + adminUsername string + networkInterfaceId string + networkInterfaceName string + subnetId string + virtualNetworkSubnetName string + virtualNetworksName string + vmName string + vmScaleSetName string + vmRunCommandName string + vmssRunCommandName string + adminPassword string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *RunCommandTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/compute/armcompute/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.adminUsername = testutil.GenerateAlphaNumericID(testsuite.T(), "rc", 6) + testsuite.networkInterfaceName = testutil.GenerateAlphaNumericID(testsuite.T(), "vmnicrc", 6) + testsuite.virtualNetworkSubnetName = testutil.GenerateAlphaNumericID(testsuite.T(), "vmssvnetnarc", 6) + testsuite.virtualNetworksName = testutil.GenerateAlphaNumericID(testsuite.T(), "vmvnetrc", 6) + testsuite.vmName = testutil.GenerateAlphaNumericID(testsuite.T(), "vmcommand", 6) + testsuite.vmScaleSetName = testutil.GenerateAlphaNumericID(testsuite.T(), "vmscalesetcommand", 6) + testsuite.vmRunCommandName = testutil.GenerateAlphaNumericID(testsuite.T(), "vmruncommand", 6) + testsuite.vmssRunCommandName = testutil.GenerateAlphaNumericID(testsuite.T(), "vmssruncommand", 6) + testsuite.adminPassword = testutil.GetEnv("ADMIN_PASSWORD", "") + testsuite.location = testutil.GetEnv("LOCATION", "eastus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name + testsuite.Prepare() +} + +func (testsuite *RunCommandTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestRunCommandTestSuite(t *testing.T) { + suite.Run(t, new(RunCommandTestSuite)) +} + +func (testsuite *RunCommandTestSuite) Prepare() { + var err error + // From step Create_NetworkInterface + template := map[string]any{ + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "outputs": map[string]any{ + "networkInterfaceId": map[string]any{ + "type": "string", + "value": "[resourceId('Microsoft.Network/networkInterfaces', parameters('networkInterfaceName'))]", + }, + }, + "parameters": map[string]any{ + "location": map[string]any{ + "type": "string", + "defaultValue": testsuite.location, + }, + "networkInterfaceName": map[string]any{ + "type": "string", + "defaultValue": testsuite.networkInterfaceName, + }, + "virtualNetworksName": map[string]any{ + "type": "string", + "defaultValue": testsuite.virtualNetworksName, + }, + }, + "resources": []any{ + map[string]any{ + "name": "[parameters('virtualNetworksName')]", + "type": "Microsoft.Network/virtualNetworks", + "apiVersion": "2021-05-01", + "location": "[parameters('location')]", + "properties": map[string]any{ + "addressSpace": map[string]any{ + "addressPrefixes": []any{ + "10.0.0.0/16", + }, + }, + "subnets": []any{ + map[string]any{ + "name": "default", + "properties": map[string]any{ + "addressPrefix": "10.0.0.0/24", + }, + }, + }, + }, + }, + map[string]any{ + "name": "[parameters('networkInterfaceName')]", + "type": "Microsoft.Network/networkInterfaces", + "apiVersion": "2021-08-01", + "dependsOn": []any{ + "[resourceId('Microsoft.Network/virtualNetworks', parameters('virtualNetworksName'))]", + }, + "location": "[parameters('location')]", + "properties": map[string]any{ + "ipConfigurations": []any{ + map[string]any{ + "name": "Ipv4config", + "properties": map[string]any{ + "subnet": map[string]any{ + "id": "[resourceId('Microsoft.Network/virtualNetworks/subnets', parameters('virtualNetworksName'), 'default')]", + }, + }, + }, + }, + }, + }, + }, + } + deployment := armresources.Deployment{ + Properties: &armresources.DeploymentProperties{ + Template: template, + Mode: to.Ptr(armresources.DeploymentModeIncremental), + }, + } + deploymentExtend, err := testutil.CreateDeployment(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName, "Create_NetworkInterface", &deployment) + testsuite.Require().NoError(err) + testsuite.networkInterfaceId = deploymentExtend.Properties.Outputs.(map[string]interface{})["networkInterfaceId"].(map[string]interface{})["value"].(string) + + // From step VirtualMachines_CreateOrUpdate + fmt.Println("Call operation: VirtualMachines_CreateOrUpdate") + virtualMachinesClient, err := armcompute.NewVirtualMachinesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + virtualMachinesClientCreateOrUpdateResponsePoller, err := virtualMachinesClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmName, armcompute.VirtualMachine{ + Location: to.Ptr(testsuite.location), + Properties: &armcompute.VirtualMachineProperties{ + HardwareProfile: &armcompute.HardwareProfile{ + VMSize: to.Ptr(armcompute.VirtualMachineSizeTypesStandardD1V2), + }, + NetworkProfile: &armcompute.NetworkProfile{ + NetworkInterfaces: []*armcompute.NetworkInterfaceReference{ + { + ID: to.Ptr(testsuite.networkInterfaceId), + Properties: &armcompute.NetworkInterfaceReferenceProperties{ + Primary: to.Ptr(true), + }, + }}, + }, + OSProfile: &armcompute.OSProfile{ + AdminPassword: to.Ptr(testsuite.adminPassword), + AdminUsername: to.Ptr(testsuite.adminUsername), + ComputerName: to.Ptr(testsuite.vmName), + }, + StorageProfile: &armcompute.StorageProfile{ + ImageReference: &armcompute.ImageReference{ + Offer: to.Ptr("WindowsServer"), + Publisher: to.Ptr("MicrosoftWindowsServer"), + SKU: to.Ptr("2016-Datacenter"), + Version: to.Ptr("latest"), + }, + OSDisk: &armcompute.OSDisk{ + Name: to.Ptr(testsuite.vmName + "osdisk"), + Caching: to.Ptr(armcompute.CachingTypesReadWrite), + CreateOption: to.Ptr(armcompute.DiskCreateOptionTypesFromImage), + ManagedDisk: &armcompute.ManagedDiskParameters{ + StorageAccountType: to.Ptr(armcompute.StorageAccountTypesStandardLRS), + }, + }, + }, + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualMachinesClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step Create_NetworkAndSubnet + template = map[string]any{ + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "outputs": map[string]any{ + "subnetId": map[string]any{ + "type": "string", + "value": "[resourceId('Microsoft.Network/virtualNetworks/subnets', parameters('virtualNetworkSubnetName'), 'default')]", + }, + }, + "parameters": map[string]any{ + "location": map[string]any{ + "type": "string", + "defaultValue": testsuite.location, + }, + "virtualNetworkSubnetName": map[string]any{ + "type": "string", + "defaultValue": testsuite.virtualNetworkSubnetName, + }, + }, + "resources": []any{ + map[string]any{ + "name": "[parameters('virtualNetworkSubnetName')]", + "type": "Microsoft.Network/virtualNetworks", + "apiVersion": "2021-05-01", + "location": "[parameters('location')]", + "properties": map[string]any{ + "addressSpace": map[string]any{ + "addressPrefixes": []any{ + "10.0.0.0/16", + }, + }, + "subnets": []any{ + map[string]any{ + "name": "default", + "properties": map[string]any{ + "addressPrefix": "10.0.0.0/24", + }, + }, + }, + }, + }, + }, + } + deployment = armresources.Deployment{ + Properties: &armresources.DeploymentProperties{ + Template: template, + Mode: to.Ptr(armresources.DeploymentModeIncremental), + }, + } + deploymentExtend, err = testutil.CreateDeployment(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName, "Create_NetworkAndSubnet", &deployment) + testsuite.Require().NoError(err) + testsuite.subnetId = deploymentExtend.Properties.Outputs.(map[string]interface{})["subnetId"].(map[string]interface{})["value"].(string) + + // From step VirtualMachineScaleSets_CreateOrUpdate + fmt.Println("Call operation: VirtualMachineScaleSets_CreateOrUpdate") + virtualMachineScaleSetsClient, err := armcompute.NewVirtualMachineScaleSetsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + virtualMachineScaleSetsClientCreateOrUpdateResponsePoller, err := virtualMachineScaleSetsClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmScaleSetName, armcompute.VirtualMachineScaleSet{ + Location: to.Ptr(testsuite.location), + Properties: &armcompute.VirtualMachineScaleSetProperties{ + Overprovision: to.Ptr(true), + UpgradePolicy: &armcompute.UpgradePolicy{ + Mode: to.Ptr(armcompute.UpgradeModeManual), + }, + VirtualMachineProfile: &armcompute.VirtualMachineScaleSetVMProfile{ + NetworkProfile: &armcompute.VirtualMachineScaleSetNetworkProfile{ + NetworkInterfaceConfigurations: []*armcompute.VirtualMachineScaleSetNetworkConfiguration{ + { + Name: to.Ptr(testsuite.vmScaleSetName), + Properties: &armcompute.VirtualMachineScaleSetNetworkConfigurationProperties{ + EnableIPForwarding: to.Ptr(true), + IPConfigurations: []*armcompute.VirtualMachineScaleSetIPConfiguration{ + { + Name: to.Ptr(testsuite.vmScaleSetName), + Properties: &armcompute.VirtualMachineScaleSetIPConfigurationProperties{ + Subnet: &armcompute.APIEntityReference{ + ID: to.Ptr(testsuite.subnetId), + }, + }, + }}, + Primary: to.Ptr(true), + }, + }}, + }, + OSProfile: &armcompute.VirtualMachineScaleSetOSProfile{ + AdminPassword: to.Ptr(testsuite.adminPassword), + AdminUsername: to.Ptr(testsuite.adminUsername), + ComputerNamePrefix: to.Ptr("vmss"), + }, + StorageProfile: &armcompute.VirtualMachineScaleSetStorageProfile{ + ImageReference: &armcompute.ImageReference{ + Offer: to.Ptr("WindowsServer"), + Publisher: to.Ptr("MicrosoftWindowsServer"), + SKU: to.Ptr("2016-Datacenter"), + Version: to.Ptr("latest"), + }, + OSDisk: &armcompute.VirtualMachineScaleSetOSDisk{ + Caching: to.Ptr(armcompute.CachingTypesReadWrite), + CreateOption: to.Ptr(armcompute.DiskCreateOptionTypesFromImage), + ManagedDisk: &armcompute.VirtualMachineScaleSetManagedDiskParameters{ + StorageAccountType: to.Ptr(armcompute.StorageAccountTypesStandardLRS), + }, + }, + }, + }, + }, + SKU: &armcompute.SKU{ + Name: to.Ptr("Standard_D1_v2"), + Capacity: to.Ptr[int64](3), + Tier: to.Ptr("Standard"), + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualMachineScaleSetsClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) +} + +// Microsoft.Compute/virtualMachines/{vmName}/runCommands/{runCommandName} +func (testsuite *RunCommandTestSuite) TestVirtualMachineRunCommands() { + var err error + // From step VirtualMachineRunCommands_CreateOrUpdate + fmt.Println("Call operation: VirtualMachineRunCommands_CreateOrUpdate") + virtualMachineRunCommandsClient, err := armcompute.NewVirtualMachineRunCommandsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + virtualMachineRunCommandsClientCreateOrUpdateResponsePoller, err := virtualMachineRunCommandsClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmName, testsuite.vmRunCommandName, armcompute.VirtualMachineRunCommand{ + Location: to.Ptr(testsuite.location), + Properties: &armcompute.VirtualMachineRunCommandProperties{ + AsyncExecution: to.Ptr(false), + Parameters: []*armcompute.RunCommandInputParameter{ + { + Name: to.Ptr("param1"), + Value: to.Ptr("value1"), + }, + { + Name: to.Ptr("param2"), + Value: to.Ptr("value2"), + }}, + RunAsPassword: to.Ptr(""), + RunAsUser: to.Ptr("user1"), + Source: &armcompute.VirtualMachineRunCommandScriptSource{ + Script: to.Ptr("Write-Host Hello World!"), + }, + TimeoutInSeconds: to.Ptr[int32](3600), + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualMachineRunCommandsClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step VirtualMachineRunCommands_List + fmt.Println("Call operation: VirtualMachineRunCommands_List") + virtualMachineRunCommandsClientNewListPager := virtualMachineRunCommandsClient.NewListPager(testsuite.location, nil) + for virtualMachineRunCommandsClientNewListPager.More() { + _, err := virtualMachineRunCommandsClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step VirtualMachineRunCommands_Get + fmt.Println("Call operation: VirtualMachineRunCommands_Get") + _, err = virtualMachineRunCommandsClient.Get(testsuite.ctx, testsuite.location, "RunPowerShellScript", nil) + testsuite.Require().NoError(err) + + // From step VirtualMachineRunCommands_ListByVirtualMachine + fmt.Println("Call operation: VirtualMachineRunCommands_ListByVirtualMachine") + virtualMachineRunCommandsClientNewListByVirtualMachinePager := virtualMachineRunCommandsClient.NewListByVirtualMachinePager(testsuite.resourceGroupName, testsuite.vmName, &armcompute.VirtualMachineRunCommandsClientListByVirtualMachineOptions{Expand: nil}) + for virtualMachineRunCommandsClientNewListByVirtualMachinePager.More() { + _, err := virtualMachineRunCommandsClientNewListByVirtualMachinePager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step VirtualMachineRunCommands_GetByVirtualMachine + fmt.Println("Call operation: VirtualMachineRunCommands_GetByVirtualMachine") + _, err = virtualMachineRunCommandsClient.GetByVirtualMachine(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmName, testsuite.vmRunCommandName, &armcompute.VirtualMachineRunCommandsClientGetByVirtualMachineOptions{Expand: nil}) + testsuite.Require().NoError(err) + + // From step VirtualMachineRunCommands_Update + fmt.Println("Call operation: VirtualMachineRunCommands_Update") + virtualMachineRunCommandsClientUpdateResponsePoller, err := virtualMachineRunCommandsClient.BeginUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmName, testsuite.vmRunCommandName, armcompute.VirtualMachineRunCommandUpdate{ + Properties: &armcompute.VirtualMachineRunCommandProperties{ + Source: &armcompute.VirtualMachineRunCommandScriptSource{ + Script: to.Ptr("Write-Host Script Source Updated!"), + }, + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualMachineRunCommandsClientUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step VirtualMachines_RunCommand + fmt.Println("Call operation: VirtualMachines_RunCommand") + virtualMachinesClient, err := armcompute.NewVirtualMachinesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + virtualMachinesClientRunCommandResponsePoller, err := virtualMachinesClient.BeginRunCommand(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmName, armcompute.RunCommandInput{ + CommandID: to.Ptr("RunPowerShellScript"), + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualMachinesClientRunCommandResponsePoller) + testsuite.Require().NoError(err) + + // From step VirtualMachineRunCommands_Delete + fmt.Println("Call operation: VirtualMachineRunCommands_Delete") + virtualMachineRunCommandsClientDeleteResponsePoller, err := virtualMachineRunCommandsClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmName, testsuite.vmRunCommandName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualMachineRunCommandsClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} + +// Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId}/runCommands/{runCommandName} +func (testsuite *RunCommandTestSuite) TestVirtualMachineScaleSetVmRunCommands() { + var err error + // From step VirtualMachineScaleSetVMRunCommands_CreateOrUpdate + fmt.Println("Call operation: VirtualMachineScaleSetVMRunCommands_CreateOrUpdate") + virtualMachineScaleSetVMRunCommandsClient, err := armcompute.NewVirtualMachineScaleSetVMRunCommandsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + virtualMachineScaleSetVMRunCommandsClientCreateOrUpdateResponsePoller, err := virtualMachineScaleSetVMRunCommandsClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmScaleSetName, "0", testsuite.vmssRunCommandName, armcompute.VirtualMachineRunCommand{ + Location: to.Ptr(testsuite.location), + Properties: &armcompute.VirtualMachineRunCommandProperties{ + AsyncExecution: to.Ptr(false), + Parameters: []*armcompute.RunCommandInputParameter{ + { + Name: to.Ptr("param1"), + Value: to.Ptr("value1"), + }, + { + Name: to.Ptr("param2"), + Value: to.Ptr("value2"), + }}, + RunAsPassword: to.Ptr(""), + RunAsUser: to.Ptr("user1"), + Source: &armcompute.VirtualMachineRunCommandScriptSource{ + Script: to.Ptr("Write-Host Hello World!"), + }, + TimeoutInSeconds: to.Ptr[int32](3600), + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualMachineScaleSetVMRunCommandsClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step VirtualMachineScaleSetVMRunCommands_List + fmt.Println("Call operation: VirtualMachineScaleSetVMRunCommands_List") + virtualMachineScaleSetVMRunCommandsClientNewListPager := virtualMachineScaleSetVMRunCommandsClient.NewListPager(testsuite.resourceGroupName, testsuite.vmScaleSetName, "0", &armcompute.VirtualMachineScaleSetVMRunCommandsClientListOptions{Expand: nil}) + for virtualMachineScaleSetVMRunCommandsClientNewListPager.More() { + _, err := virtualMachineScaleSetVMRunCommandsClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step VirtualMachineScaleSetVMRunCommands_Get + fmt.Println("Call operation: VirtualMachineScaleSetVMRunCommands_Get") + _, err = virtualMachineScaleSetVMRunCommandsClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmScaleSetName, "0", testsuite.vmssRunCommandName, &armcompute.VirtualMachineScaleSetVMRunCommandsClientGetOptions{Expand: nil}) + testsuite.Require().NoError(err) + + // From step VirtualMachineScaleSetVMRunCommands_Update + fmt.Println("Call operation: VirtualMachineScaleSetVMRunCommands_Update") + virtualMachineScaleSetVMRunCommandsClientUpdateResponsePoller, err := virtualMachineScaleSetVMRunCommandsClient.BeginUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmScaleSetName, "0", testsuite.vmssRunCommandName, armcompute.VirtualMachineRunCommandUpdate{ + Properties: &armcompute.VirtualMachineRunCommandProperties{ + Source: &armcompute.VirtualMachineRunCommandScriptSource{ + Script: to.Ptr("Write-Host Script Source Updated!"), + }, + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualMachineScaleSetVMRunCommandsClientUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step VirtualMachineScaleSetVMs_RunCommand + fmt.Println("Call operation: VirtualMachineScaleSetVMs_RunCommand") + virtualMachineScaleSetVMsClient, err := armcompute.NewVirtualMachineScaleSetVMsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + virtualMachineScaleSetVMsClientRunCommandResponsePoller, err := virtualMachineScaleSetVMsClient.BeginRunCommand(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmScaleSetName, "0", armcompute.RunCommandInput{ + CommandID: to.Ptr("RunPowerShellScript"), + Script: []*string{ + to.Ptr("Write-Host Hello World!")}, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualMachineScaleSetVMsClientRunCommandResponsePoller) + testsuite.Require().NoError(err) + + // From step VirtualMachineScaleSetVMRunCommands_Delete + fmt.Println("Call operation: VirtualMachineScaleSetVMRunCommands_Delete") + virtualMachineScaleSetVMRunCommandsClientDeleteResponsePoller, err := virtualMachineScaleSetVMRunCommandsClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmScaleSetName, "0", testsuite.vmssRunCommandName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualMachineScaleSetVMRunCommandsClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/compute/armcompute/skus_live_test.go b/sdk/resourcemanager/compute/armcompute/skus_live_test.go new file mode 100644 index 000000000000..89b12f6ec696 --- /dev/null +++ b/sdk/resourcemanager/compute/armcompute/skus_live_test.go @@ -0,0 +1,73 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/stretchr/testify/suite" +) + +type SkusTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *SkusTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/compute/armcompute/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.location = testutil.GetEnv("LOCATION", "eastus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name +} + +func (testsuite *SkusTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestSkusTestSuite(t *testing.T) { + suite.Run(t, new(SkusTestSuite)) +} + +// Microsoft.Compute/skus +func (testsuite *SkusTestSuite) TestResourceSkus() { + var err error + // From step ResourceSkus_List + fmt.Println("Call operation: ResourceSKUs_List") + resourceSKUsClient, err := armcompute.NewResourceSKUsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + resourceSKUsClientNewListPager := resourceSKUsClient.NewListPager(&armcompute.ResourceSKUsClientListOptions{ + Filter: to.Ptr("location eq 'westus2'"), + }) + for resourceSKUsClientNewListPager.More() { + _, err := resourceSKUsClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } +} diff --git a/sdk/resourcemanager/compute/armcompute/snapshot_live_test.go b/sdk/resourcemanager/compute/armcompute/snapshot_live_test.go new file mode 100644 index 000000000000..e664393005d1 --- /dev/null +++ b/sdk/resourcemanager/compute/armcompute/snapshot_live_test.go @@ -0,0 +1,141 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/stretchr/testify/suite" +) + +type SnapshotTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + snapshotName string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *SnapshotTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/compute/armcompute/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.snapshotName = testutil.GenerateAlphaNumericID(testsuite.T(), "snapshotna", 6) + testsuite.location = testutil.GetEnv("LOCATION", "eastus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name +} + +func (testsuite *SnapshotTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestSnapshotTestSuite(t *testing.T) { + suite.Run(t, new(SnapshotTestSuite)) +} + +// Microsoft.Compute/snapshots/{snapshotName} +func (testsuite *SnapshotTestSuite) TestSnapshots() { + var err error + // From step Snapshots_CreateOrUpdate + fmt.Println("Call operation: Snapshots_CreateOrUpdate") + snapshotsClient, err := armcompute.NewSnapshotsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + snapshotsClientCreateOrUpdateResponsePoller, err := snapshotsClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.snapshotName, armcompute.Snapshot{ + Location: to.Ptr(testsuite.location), + Properties: &armcompute.SnapshotProperties{ + CreationData: &armcompute.CreationData{ + CreateOption: to.Ptr(armcompute.DiskCreateOptionEmpty), + }, + DiskSizeGB: to.Ptr[int32](10), + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, snapshotsClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step Snapshots_List + fmt.Println("Call operation: Snapshots_List") + snapshotsClientNewListPager := snapshotsClient.NewListPager(nil) + for snapshotsClientNewListPager.More() { + _, err := snapshotsClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step Snapshots_ListByResourceGroup + fmt.Println("Call operation: Snapshots_ListByResourceGroup") + snapshotsClientNewListByResourceGroupPager := snapshotsClient.NewListByResourceGroupPager(testsuite.resourceGroupName, nil) + for snapshotsClientNewListByResourceGroupPager.More() { + _, err := snapshotsClientNewListByResourceGroupPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step Snapshots_Get + fmt.Println("Call operation: Snapshots_Get") + _, err = snapshotsClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.snapshotName, nil) + testsuite.Require().NoError(err) + + // From step Snapshots_Update + fmt.Println("Call operation: Snapshots_Update") + snapshotsClientUpdateResponsePoller, err := snapshotsClient.BeginUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.snapshotName, armcompute.SnapshotUpdate{ + Properties: &armcompute.SnapshotUpdateProperties{ + DiskSizeGB: to.Ptr[int32](20), + }, + Tags: map[string]*string{ + "department": to.Ptr("Development"), + "project": to.Ptr("UpdateSnapshots"), + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, snapshotsClientUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step Snapshots_GrantAccess + fmt.Println("Call operation: Snapshots_GrantAccess") + snapshotsClientGrantAccessResponsePoller, err := snapshotsClient.BeginGrantAccess(testsuite.ctx, testsuite.resourceGroupName, testsuite.snapshotName, armcompute.GrantAccessData{ + Access: to.Ptr(armcompute.AccessLevelRead), + DurationInSeconds: to.Ptr[int32](300), + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, snapshotsClientGrantAccessResponsePoller) + testsuite.Require().NoError(err) + + // From step Snapshots_RevokeAccess + fmt.Println("Call operation: Snapshots_RevokeAccess") + snapshotsClientRevokeAccessResponsePoller, err := snapshotsClient.BeginRevokeAccess(testsuite.ctx, testsuite.resourceGroupName, testsuite.snapshotName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, snapshotsClientRevokeAccessResponsePoller) + testsuite.Require().NoError(err) + + // From step Snapshots_Delete + fmt.Println("Call operation: Snapshots_Delete") + snapshotsClientDeleteResponsePoller, err := snapshotsClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.snapshotName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, snapshotsClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/compute/armcompute/sshpublickey_live_test.go b/sdk/resourcemanager/compute/armcompute/sshpublickey_live_test.go new file mode 100644 index 000000000000..3b5dc535b395 --- /dev/null +++ b/sdk/resourcemanager/compute/armcompute/sshpublickey_live_test.go @@ -0,0 +1,113 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/stretchr/testify/suite" +) + +type SshPublicKeyTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + sshPublicKeyName string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *SshPublicKeyTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/compute/armcompute/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.sshPublicKeyName = testutil.GenerateAlphaNumericID(testsuite.T(), "sshpublick", 6) + testsuite.location = testutil.GetEnv("LOCATION", "eastus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name +} + +func (testsuite *SshPublicKeyTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestSshPublicKeyTestSuite(t *testing.T) { + suite.Run(t, new(SshPublicKeyTestSuite)) +} + +// Microsoft.Compute/sshPublicKeys/{sshPublicKeyName} +func (testsuite *SshPublicKeyTestSuite) TestSshPublicKeys() { + var err error + // From step SshPublicKeys_Create + fmt.Println("Call operation: SSHPublicKeys_Create") + sSHPublicKeysClient, err := armcompute.NewSSHPublicKeysClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + _, err = sSHPublicKeysClient.Create(testsuite.ctx, testsuite.resourceGroupName, testsuite.sshPublicKeyName, armcompute.SSHPublicKeyResource{ + Location: to.Ptr(testsuite.location), + }, nil) + testsuite.Require().NoError(err) + + // From step SshPublicKeys_ListBySubscription + fmt.Println("Call operation: SSHPublicKeys_ListBySubscription") + sSHPublicKeysClientNewListBySubscriptionPager := sSHPublicKeysClient.NewListBySubscriptionPager(nil) + for sSHPublicKeysClientNewListBySubscriptionPager.More() { + _, err := sSHPublicKeysClientNewListBySubscriptionPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step SshPublicKeys_ListByResourceGroup + fmt.Println("Call operation: SSHPublicKeys_ListByResourceGroup") + sSHPublicKeysClientNewListByResourceGroupPager := sSHPublicKeysClient.NewListByResourceGroupPager(testsuite.resourceGroupName, nil) + for sSHPublicKeysClientNewListByResourceGroupPager.More() { + _, err := sSHPublicKeysClientNewListByResourceGroupPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step SshPublicKeys_Get + fmt.Println("Call operation: SSHPublicKeys_Get") + _, err = sSHPublicKeysClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.sshPublicKeyName, nil) + testsuite.Require().NoError(err) + + // From step SshPublicKeys_Update + fmt.Println("Call operation: SSHPublicKeys_Update") + _, err = sSHPublicKeysClient.Update(testsuite.ctx, testsuite.resourceGroupName, testsuite.sshPublicKeyName, armcompute.SSHPublicKeyUpdateResource{ + Tags: map[string]*string{ + "key2854": to.Ptr("a"), + }, + }, nil) + testsuite.Require().NoError(err) + + // From step SshPublicKeys_GenerateKeyPair + fmt.Println("Call operation: SSHPublicKeys_GenerateKeyPair") + _, err = sSHPublicKeysClient.GenerateKeyPair(testsuite.ctx, testsuite.resourceGroupName, testsuite.sshPublicKeyName, nil) + testsuite.Require().NoError(err) + + // From step SshPublicKeys_Delete + fmt.Println("Call operation: SSHPublicKeys_Delete") + _, err = sSHPublicKeysClient.Delete(testsuite.ctx, testsuite.resourceGroupName, testsuite.sshPublicKeyName, nil) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/compute/armcompute/virtualmachine_live_test.go b/sdk/resourcemanager/compute/armcompute/virtualmachine_live_test.go new file mode 100644 index 000000000000..987606b16747 --- /dev/null +++ b/sdk/resourcemanager/compute/armcompute/virtualmachine_live_test.go @@ -0,0 +1,348 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources" + "github.com/stretchr/testify/suite" +) + +type VirtualMachineTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + adminUsername string + networkInterfaceId string + networkInterfaceName string + virtualNetworksName string + vmName string + adminPassword string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *VirtualMachineTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/compute/armcompute/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.adminUsername = testutil.GenerateAlphaNumericID(testsuite.T(), "vmuserna", 6) + testsuite.networkInterfaceName = testutil.GenerateAlphaNumericID(testsuite.T(), "vmnic", 6) + testsuite.virtualNetworksName = testutil.GenerateAlphaNumericID(testsuite.T(), "vmvnet", 6) + testsuite.vmName = testutil.GenerateAlphaNumericID(testsuite.T(), "vmname", 6) + testsuite.adminPassword = testutil.GetEnv("ADMIN_PASSWORD", "") + testsuite.location = testutil.GetEnv("LOCATION", "eastus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name + testsuite.Prepare() +} + +func (testsuite *VirtualMachineTestSuite) TearDownSuite() { + testsuite.Cleanup() + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestVirtualMachineTestSuite(t *testing.T) { + suite.Run(t, new(VirtualMachineTestSuite)) +} + +func (testsuite *VirtualMachineTestSuite) Prepare() { + var err error + // From step Create_NetworkInterface + template := map[string]any{ + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "outputs": map[string]any{ + "networkInterfaceId": map[string]any{ + "type": "string", + "value": "[resourceId('Microsoft.Network/networkInterfaces', parameters('networkInterfaceName'))]", + }, + }, + "parameters": map[string]any{ + "location": map[string]any{ + "type": "string", + "defaultValue": testsuite.location, + }, + "networkInterfaceName": map[string]any{ + "type": "string", + "defaultValue": testsuite.networkInterfaceName, + }, + "virtualNetworksName": map[string]any{ + "type": "string", + "defaultValue": testsuite.virtualNetworksName, + }, + }, + "resources": []any{ + map[string]any{ + "name": "[parameters('virtualNetworksName')]", + "type": "Microsoft.Network/virtualNetworks", + "apiVersion": "2021-05-01", + "location": "[parameters('location')]", + "properties": map[string]any{ + "addressSpace": map[string]any{ + "addressPrefixes": []any{ + "10.0.0.0/16", + }, + }, + "subnets": []any{ + map[string]any{ + "name": "default", + "properties": map[string]any{ + "addressPrefix": "10.0.0.0/24", + }, + }, + }, + }, + }, + map[string]any{ + "name": "[parameters('networkInterfaceName')]", + "type": "Microsoft.Network/networkInterfaces", + "apiVersion": "2021-08-01", + "dependsOn": []any{ + "[resourceId('Microsoft.Network/virtualNetworks', parameters('virtualNetworksName'))]", + }, + "location": "[parameters('location')]", + "properties": map[string]any{ + "ipConfigurations": []any{ + map[string]any{ + "name": "Ipv4config", + "properties": map[string]any{ + "subnet": map[string]any{ + "id": "[resourceId('Microsoft.Network/virtualNetworks/subnets', parameters('virtualNetworksName'), 'default')]", + }, + }, + }, + }, + }, + }, + }, + } + deployment := armresources.Deployment{ + Properties: &armresources.DeploymentProperties{ + Template: template, + Mode: to.Ptr(armresources.DeploymentModeIncremental), + }, + } + deploymentExtend, err := testutil.CreateDeployment(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName, "Create_NetworkInterface", &deployment) + testsuite.Require().NoError(err) + testsuite.networkInterfaceId = deploymentExtend.Properties.Outputs.(map[string]interface{})["networkInterfaceId"].(map[string]interface{})["value"].(string) + + // From step VirtualMachines_CreateOrUpdate + fmt.Println("Call operation: VirtualMachines_CreateOrUpdate") + virtualMachinesClient, err := armcompute.NewVirtualMachinesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + virtualMachinesClientCreateOrUpdateResponsePoller, err := virtualMachinesClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmName, armcompute.VirtualMachine{ + Location: to.Ptr(testsuite.location), + Properties: &armcompute.VirtualMachineProperties{ + HardwareProfile: &armcompute.HardwareProfile{ + VMSize: to.Ptr(armcompute.VirtualMachineSizeTypesStandardD1V2), + }, + NetworkProfile: &armcompute.NetworkProfile{ + NetworkInterfaces: []*armcompute.NetworkInterfaceReference{ + { + ID: to.Ptr(testsuite.networkInterfaceId), + Properties: &armcompute.NetworkInterfaceReferenceProperties{ + Primary: to.Ptr(true), + }, + }}, + }, + OSProfile: &armcompute.OSProfile{ + AdminPassword: to.Ptr(testsuite.adminPassword), + AdminUsername: to.Ptr(testsuite.adminUsername), + ComputerName: to.Ptr(testsuite.vmName), + }, + StorageProfile: &armcompute.StorageProfile{ + ImageReference: &armcompute.ImageReference{ + Offer: to.Ptr("WindowsServer"), + Publisher: to.Ptr("MicrosoftWindowsServer"), + SKU: to.Ptr("2016-Datacenter"), + Version: to.Ptr("latest"), + }, + OSDisk: &armcompute.OSDisk{ + Name: to.Ptr(testsuite.vmName + "osdisk"), + Caching: to.Ptr(armcompute.CachingTypesReadWrite), + CreateOption: to.Ptr(armcompute.DiskCreateOptionTypesFromImage), + ManagedDisk: &armcompute.ManagedDiskParameters{ + StorageAccountType: to.Ptr(armcompute.StorageAccountTypesStandardLRS), + }, + }, + }, + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualMachinesClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) +} + +// Microsoft.Compute/virtualMachines/{vmName} +func (testsuite *VirtualMachineTestSuite) TestVirtualMachines() { + var err error + // From step VirtualMachines_ListByLocation + fmt.Println("Call operation: VirtualMachines_ListByLocation") + virtualMachinesClient, err := armcompute.NewVirtualMachinesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + virtualMachinesClientNewListByLocationPager := virtualMachinesClient.NewListByLocationPager(testsuite.location, nil) + for virtualMachinesClientNewListByLocationPager.More() { + _, err := virtualMachinesClientNewListByLocationPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step VirtualMachines_InstanceView + fmt.Println("Call operation: VirtualMachines_InstanceView") + _, err = virtualMachinesClient.InstanceView(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmName, nil) + testsuite.Require().NoError(err) + + // From step VirtualMachines_ListAll + fmt.Println("Call operation: VirtualMachines_ListAll") + virtualMachinesClientNewListAllPager := virtualMachinesClient.NewListAllPager(&armcompute.VirtualMachinesClientListAllOptions{StatusOnly: nil, + Filter: nil, + }) + for virtualMachinesClientNewListAllPager.More() { + _, err := virtualMachinesClientNewListAllPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step VirtualMachines_List + fmt.Println("Call operation: VirtualMachines_List") + virtualMachinesClientNewListPager := virtualMachinesClient.NewListPager(testsuite.resourceGroupName, &armcompute.VirtualMachinesClientListOptions{Filter: nil}) + for virtualMachinesClientNewListPager.More() { + _, err := virtualMachinesClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step VirtualMachines_Get + fmt.Println("Call operation: VirtualMachines_Get") + _, err = virtualMachinesClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmName, &armcompute.VirtualMachinesClientGetOptions{Expand: to.Ptr(armcompute.InstanceViewTypesUserData)}) + testsuite.Require().NoError(err) + + // From step VirtualMachines_ListAvailableSizes + fmt.Println("Call operation: VirtualMachines_ListAvailableSizes") + virtualMachinesClientNewListAvailableSizesPager := virtualMachinesClient.NewListAvailableSizesPager(testsuite.resourceGroupName, testsuite.vmName, nil) + for virtualMachinesClientNewListAvailableSizesPager.More() { + _, err := virtualMachinesClientNewListAvailableSizesPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step VirtualMachines_Update + fmt.Println("Call operation: VirtualMachines_Update") + virtualMachinesClientUpdateResponsePoller, err := virtualMachinesClient.BeginUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmName, armcompute.VirtualMachineUpdate{ + Tags: map[string]*string{ + "virtaulMachine": to.Ptr("vmupdate"), + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualMachinesClientUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step VirtualMachines_InstallPatches + fmt.Println("Call operation: VirtualMachines_InstallPatches") + virtualMachinesClientInstallPatchesResponsePoller, err := virtualMachinesClient.BeginInstallPatches(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmName, armcompute.VirtualMachineInstallPatchesParameters{ + MaximumDuration: to.Ptr("PT4H"), + RebootSetting: to.Ptr(armcompute.VMGuestPatchRebootSettingIfRequired), + WindowsParameters: &armcompute.WindowsParameters{ + ClassificationsToInclude: []*armcompute.VMGuestPatchClassificationWindows{ + to.Ptr(armcompute.VMGuestPatchClassificationWindowsCritical), + to.Ptr(armcompute.VMGuestPatchClassificationWindowsSecurity)}, + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualMachinesClientInstallPatchesResponsePoller) + testsuite.Require().NoError(err) + + // From step VirtualMachines_Deallocate + fmt.Println("Call operation: VirtualMachines_Deallocate") + virtualMachinesClientDeallocateResponsePoller, err := virtualMachinesClient.BeginDeallocate(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmName, &armcompute.VirtualMachinesClientBeginDeallocateOptions{Hibernate: nil}) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualMachinesClientDeallocateResponsePoller) + testsuite.Require().NoError(err) + + // From step VirtualMachines_Start + fmt.Println("Call operation: VirtualMachines_Start") + virtualMachinesClientStartResponsePoller, err := virtualMachinesClient.BeginStart(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualMachinesClientStartResponsePoller) + testsuite.Require().NoError(err) + + // From step VirtualMachines_AssessPatches + fmt.Println("Call operation: VirtualMachines_AssessPatches") + virtualMachinesClientAssessPatchesResponsePoller, err := virtualMachinesClient.BeginAssessPatches(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualMachinesClientAssessPatchesResponsePoller) + testsuite.Require().NoError(err) + + // From step VirtualMachines_Restart + fmt.Println("Call operation: VirtualMachines_Restart") + virtualMachinesClientRestartResponsePoller, err := virtualMachinesClient.BeginRestart(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualMachinesClientRestartResponsePoller) + testsuite.Require().NoError(err) + + // From step VirtualMachines_Reapply + fmt.Println("Call operation: VirtualMachines_Reapply") + virtualMachinesClientReapplyResponsePoller, err := virtualMachinesClient.BeginReapply(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualMachinesClientReapplyResponsePoller) + testsuite.Require().NoError(err) + + // From step VirtualMachines_RunCommand + fmt.Println("Call operation: VirtualMachines_RunCommand") + virtualMachinesClientRunCommandResponsePoller, err := virtualMachinesClient.BeginRunCommand(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmName, armcompute.RunCommandInput{ + CommandID: to.Ptr("RunPowerShellScript"), + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualMachinesClientRunCommandResponsePoller) + testsuite.Require().NoError(err) + + // From step VirtualMachines_Redeploy + fmt.Println("Call operation: VirtualMachines_Redeploy") + virtualMachinesClientRedeployResponsePoller, err := virtualMachinesClient.BeginRedeploy(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualMachinesClientRedeployResponsePoller) + testsuite.Require().NoError(err) + + // From step VirtualMachines_PowerOff + fmt.Println("Call operation: VirtualMachines_PowerOff") + virtualMachinesClientPowerOffResponsePoller, err := virtualMachinesClient.BeginPowerOff(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmName, &armcompute.VirtualMachinesClientBeginPowerOffOptions{SkipShutdown: nil}) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualMachinesClientPowerOffResponsePoller) + testsuite.Require().NoError(err) +} + +func (testsuite *VirtualMachineTestSuite) Cleanup() { + var err error + // From step VirtualMachines_Delete + fmt.Println("Call operation: VirtualMachines_Delete") + virtualMachinesClient, err := armcompute.NewVirtualMachinesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + virtualMachinesClientDeleteResponsePoller, err := virtualMachinesClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmName, &armcompute.VirtualMachinesClientBeginDeleteOptions{ForceDeletion: to.Ptr(true)}) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualMachinesClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/compute/armcompute/virtualmachineextensionimage_live_test.go b/sdk/resourcemanager/compute/armcompute/virtualmachineextensionimage_live_test.go new file mode 100644 index 000000000000..c67c58321675 --- /dev/null +++ b/sdk/resourcemanager/compute/armcompute/virtualmachineextensionimage_live_test.go @@ -0,0 +1,82 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/stretchr/testify/suite" +) + +type VirtualMachineExtensionImageTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *VirtualMachineExtensionImageTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/compute/armcompute/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.location = testutil.GetEnv("LOCATION", "eastus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name +} + +func (testsuite *VirtualMachineExtensionImageTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestVirtualMachineExtensionImageTestSuite(t *testing.T) { + suite.Run(t, new(VirtualMachineExtensionImageTestSuite)) +} + +// Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types +func (testsuite *VirtualMachineExtensionImageTestSuite) TestVirtualMachineExtensionImages() { + publisherName := "Microsoft.Compute" + typeParam := "CustomScriptExtension" + version := "1.9" + var err error + // From step VirtualMachineExtensionImages_ListTypes + fmt.Println("Call operation: VirtualMachineExtensionImages_ListTypes") + virtualMachineExtensionImagesClient, err := armcompute.NewVirtualMachineExtensionImagesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + _, err = virtualMachineExtensionImagesClient.ListTypes(testsuite.ctx, testsuite.location, publisherName, nil) + testsuite.Require().NoError(err) + + // From step VirtualMachineExtensionImages_ListVersions + fmt.Println("Call operation: VirtualMachineExtensionImages_ListVersions") + _, err = virtualMachineExtensionImagesClient.ListVersions(testsuite.ctx, testsuite.location, publisherName, typeParam, &armcompute.VirtualMachineExtensionImagesClientListVersionsOptions{Filter: nil, + Top: nil, + Orderby: nil, + }) + testsuite.Require().NoError(err) + + // From step VirtualMachineExtensionImages_Get + fmt.Println("Call operation: VirtualMachineExtensionImages_Get") + _, err = virtualMachineExtensionImagesClient.Get(testsuite.ctx, testsuite.location, publisherName, typeParam, version, nil) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/compute/armcompute/virtualmachineimage_live_test.go b/sdk/resourcemanager/compute/armcompute/virtualmachineimage_live_test.go new file mode 100644 index 000000000000..8d2d799af082 --- /dev/null +++ b/sdk/resourcemanager/compute/armcompute/virtualmachineimage_live_test.go @@ -0,0 +1,93 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/stretchr/testify/suite" +) + +type VirtualMachineImageTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *VirtualMachineImageTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/compute/armcompute/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.location = testutil.GetEnv("LOCATION", "eastus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name +} + +func (testsuite *VirtualMachineImageTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestVirtualMachineImageTestSuite(t *testing.T) { + suite.Run(t, new(VirtualMachineImageTestSuite)) +} + +// Microsoft.Compute/locations/publishers +func (testsuite *VirtualMachineImageTestSuite) TestVirtualMachineImages() { + offer := "office-365" + publisherName := "MicrosoftWindowsDesktop" + skus := "win11-22h2-avd-m365" + version := "22621.1105.230110" + var err error + // From step VirtualMachineImages_ListPublishers + fmt.Println("Call operation: VirtualMachineImages_ListPublishers") + virtualMachineImagesClient, err := armcompute.NewVirtualMachineImagesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + _, err = virtualMachineImagesClient.ListPublishers(testsuite.ctx, testsuite.location, nil) + testsuite.Require().NoError(err) + + // From step VirtualMachineImages_ListOffers + fmt.Println("Call operation: VirtualMachineImages_ListOffers") + _, err = virtualMachineImagesClient.ListOffers(testsuite.ctx, testsuite.location, publisherName, nil) + testsuite.Require().NoError(err) + + // From step VirtualMachineImages_ListSkus + fmt.Println("Call operation: VirtualMachineImages_ListSKUs") + _, err = virtualMachineImagesClient.ListSKUs(testsuite.ctx, testsuite.location, publisherName, offer, nil) + testsuite.Require().NoError(err) + + // From step VirtualMachineImages_List + fmt.Println("Call operation: VirtualMachineImages_List") + _, err = virtualMachineImagesClient.List(testsuite.ctx, testsuite.location, publisherName, offer, skus, &armcompute.VirtualMachineImagesClientListOptions{Expand: nil, + Top: nil, + Orderby: nil, + }) + testsuite.Require().NoError(err) + + // From step VirtualMachineImages_Get + fmt.Println("Call operation: VirtualMachineImages_Get") + _, err = virtualMachineImagesClient.Get(testsuite.ctx, testsuite.location, publisherName, offer, skus, version, nil) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/compute/armcompute/virtualmachines_client_live_test.go b/sdk/resourcemanager/compute/armcompute/virtualmachines_client_live_test.go deleted file mode 100644 index 01d54b7ab595..000000000000 --- a/sdk/resourcemanager/compute/armcompute/virtualmachines_client_live_test.go +++ /dev/null @@ -1,312 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package armcompute_test - -import ( - "context" - "fmt" - "testing" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork" - "github.com/stretchr/testify/suite" -) - -type VirtualMachinesClientTestSuite struct { - suite.Suite - - ctx context.Context - cred azcore.TokenCredential - options *arm.ClientOptions - location string - resourceGroupName string - subscriptionID string -} - -func (testsuite *VirtualMachinesClientTestSuite) SetupSuite() { - testsuite.ctx = context.Background() - testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) - testsuite.location = testutil.GetEnv("LOCATION", "eastus") - testsuite.subscriptionID = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") - testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/compute/armcompute/testdata") - resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionID, testsuite.cred, testsuite.options, testsuite.location) - testsuite.Require().NoError(err) - testsuite.resourceGroupName = *resourceGroup.Name -} - -func (testsuite *VirtualMachinesClientTestSuite) TearDownSuite() { - _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionID, testsuite.cred, testsuite.options, testsuite.resourceGroupName) - testsuite.Require().NoError(err) - testutil.StopRecording(testsuite.T()) -} - -func TestVirtualMachinesClient(t *testing.T) { - suite.Run(t, new(VirtualMachinesClientTestSuite)) -} - -func (testsuite *VirtualMachinesClientTestSuite) TestVirtualMachineCRUD() { - // create virtual network - vnClient, err := armnetwork.NewVirtualNetworksClient(testsuite.subscriptionID, testsuite.cred, testsuite.options) - testsuite.Require().NoError(err) - vnName := "go-test-network" - vnPoller, err := vnClient.BeginCreateOrUpdate( - testsuite.ctx, - testsuite.resourceGroupName, - vnName, - armnetwork.VirtualNetwork{ - Location: to.Ptr(testsuite.location), - Properties: &armnetwork.VirtualNetworkPropertiesFormat{ - AddressSpace: &armnetwork.AddressSpace{ - AddressPrefixes: []*string{ - to.Ptr("10.1.0.0/16"), - }, - }, - }, - }, - nil, - ) - testsuite.Require().NoError(err) - vnResp, err := testutil.PollForTest(testsuite.ctx, vnPoller) - testsuite.Require().NoError(err) - testsuite.Require().Equal(*vnResp.Name, vnName) - - // create subnet - subClient, err := armnetwork.NewSubnetsClient(testsuite.subscriptionID, testsuite.cred, testsuite.options) - testsuite.Require().NoError(err) - subName := "go-test-subnet" - subPoller, err := subClient.BeginCreateOrUpdate( - testsuite.ctx, - testsuite.resourceGroupName, - vnName, - subName, - armnetwork.Subnet{ - Properties: &armnetwork.SubnetPropertiesFormat{ - AddressPrefix: to.Ptr("10.1.10.0/24"), - }, - }, - nil, - ) - testsuite.Require().NoError(err) - subResp, err := testutil.PollForTest(testsuite.ctx, subPoller) - testsuite.Require().NoError(err) - subnetID := *subResp.ID - testsuite.Require().Equal(*subResp.Name, subName) - - // create public ip address - ipClient, err := armnetwork.NewPublicIPAddressesClient(testsuite.subscriptionID, testsuite.cred, testsuite.options) - testsuite.Require().NoError(err) - ipName := "go-test-ip" - testsuite.Require().NoError(err) - ipPoller, err := ipClient.BeginCreateOrUpdate( - testsuite.ctx, - testsuite.resourceGroupName, - ipName, - armnetwork.PublicIPAddress{ - Location: to.Ptr(testsuite.location), - Properties: &armnetwork.PublicIPAddressPropertiesFormat{ - PublicIPAllocationMethod: to.Ptr(armnetwork.IPAllocationMethodStatic), // Static or Dynamic - }, - }, - nil, - ) - testsuite.Require().NoError(err) - ipResp, err := testutil.PollForTest(testsuite.ctx, ipPoller) - testsuite.Require().NoError(err) - publicIPAddressID := *ipResp.ID - testsuite.Require().Equal(*ipResp.Name, ipName) - - // create network security group - nsgClient, err := armnetwork.NewSecurityGroupsClient(testsuite.subscriptionID, testsuite.cred, testsuite.options) - testsuite.Require().NoError(err) - nsgName := "go-test-nsg" - testsuite.Require().NoError(err) - nsgPoller, err := nsgClient.BeginCreateOrUpdate( - testsuite.ctx, - testsuite.resourceGroupName, - nsgName, - armnetwork.SecurityGroup{ - Location: to.Ptr(testsuite.location), - Properties: &armnetwork.SecurityGroupPropertiesFormat{ - SecurityRules: []*armnetwork.SecurityRule{ - { - Name: to.Ptr("sample_inbound_22"), - Properties: &armnetwork.SecurityRulePropertiesFormat{ - SourceAddressPrefix: to.Ptr("0.0.0.0/0"), - SourcePortRange: to.Ptr("*"), - DestinationAddressPrefix: to.Ptr("0.0.0.0/0"), - DestinationPortRange: to.Ptr("22"), - Protocol: to.Ptr(armnetwork.SecurityRuleProtocolTCP), - Access: to.Ptr(armnetwork.SecurityRuleAccessAllow), - Priority: to.Ptr[int32](100), - Description: to.Ptr("sample network security group inbound port 22"), - Direction: to.Ptr(armnetwork.SecurityRuleDirectionInbound), - }, - }, - // outbound - { - Name: to.Ptr("sample_outbound_22"), - Properties: &armnetwork.SecurityRulePropertiesFormat{ - SourceAddressPrefix: to.Ptr("0.0.0.0/0"), - SourcePortRange: to.Ptr("*"), - DestinationAddressPrefix: to.Ptr("0.0.0.0/0"), - DestinationPortRange: to.Ptr("22"), - Protocol: to.Ptr(armnetwork.SecurityRuleProtocolTCP), - Access: to.Ptr(armnetwork.SecurityRuleAccessAllow), - Priority: to.Ptr[int32](100), - Description: to.Ptr("sample network security group outbound port 22"), - Direction: to.Ptr(armnetwork.SecurityRuleDirectionOutbound), - }, - }, - }, - }, - }, - nil, - ) - testsuite.Require().NoError(err) - nsgResp, err := testutil.PollForTest(testsuite.ctx, nsgPoller) - testsuite.Require().NoError(err) - networkSecurityGroupID := *nsgResp.ID - testsuite.Require().Equal(*nsgResp.Name, nsgName) - - // create network interface - nicClient, err := armnetwork.NewInterfacesClient(testsuite.subscriptionID, testsuite.cred, testsuite.options) - testsuite.Require().NoError(err) - nicName := "go-test-nic" - testsuite.Require().NoError(err) - nicPoller, err := nicClient.BeginCreateOrUpdate( - testsuite.ctx, - testsuite.resourceGroupName, - nicName, - armnetwork.Interface{ - Location: to.Ptr(testsuite.location), - Properties: &armnetwork.InterfacePropertiesFormat{ - //NetworkSecurityGroup: - IPConfigurations: []*armnetwork.InterfaceIPConfiguration{ - { - Name: to.Ptr("ipConfig"), - Properties: &armnetwork.InterfaceIPConfigurationPropertiesFormat{ - PrivateIPAllocationMethod: to.Ptr(armnetwork.IPAllocationMethodDynamic), - Subnet: &armnetwork.Subnet{ - ID: to.Ptr(subnetID), - }, - PublicIPAddress: &armnetwork.PublicIPAddress{ - ID: to.Ptr(publicIPAddressID), - }, - }, - }, - }, - NetworkSecurityGroup: &armnetwork.SecurityGroup{ - ID: to.Ptr(networkSecurityGroupID), - }, - }, - }, - nil, - ) - testsuite.Require().NoError(err) - nicResp, err := testutil.PollForTest(testsuite.ctx, nicPoller) - testsuite.Require().NoError(err) - networkInterfaceID := *nicResp.ID - testsuite.Require().Equal(*nicResp.Name, nicName) - - // create virtual machine - fmt.Println("Call operation: VirtualMachines_CreateOrUpdate") - vmClient, err := armcompute.NewVirtualMachinesClient(testsuite.subscriptionID, testsuite.cred, testsuite.options) - testsuite.Require().NoError(err) - vmName := "go-test-vm" - testsuite.Require().NoError(err) - diskName := "go-test-disk" - testsuite.Require().NoError(err) - vmPoller, err := vmClient.BeginCreateOrUpdate( - testsuite.ctx, - testsuite.resourceGroupName, - vmName, - armcompute.VirtualMachine{ - Location: to.Ptr(testsuite.location), - Identity: &armcompute.VirtualMachineIdentity{ - Type: to.Ptr(armcompute.ResourceIdentityTypeNone), - }, - Properties: &armcompute.VirtualMachineProperties{ - StorageProfile: &armcompute.StorageProfile{ - ImageReference: &armcompute.ImageReference{ - Offer: to.Ptr("WindowsServer"), - Publisher: to.Ptr("MicrosoftWindowsServer"), - SKU: to.Ptr("2019-Datacenter"), - Version: to.Ptr("latest"), - }, - OSDisk: &armcompute.OSDisk{ - Name: to.Ptr(diskName), - CreateOption: to.Ptr(armcompute.DiskCreateOptionTypesFromImage), - Caching: to.Ptr(armcompute.CachingTypesReadWrite), - ManagedDisk: &armcompute.ManagedDiskParameters{ - StorageAccountType: to.Ptr(armcompute.StorageAccountTypesStandardLRS), // OSDisk type Standard/Premium HDD/SSD - }, - }, - }, - HardwareProfile: &armcompute.HardwareProfile{ - VMSize: to.Ptr(armcompute.VirtualMachineSizeTypesStandardF2S), // VM size include vCPUs,RAM,Data Disks,Temp storage. - }, - OSProfile: &armcompute.OSProfile{ - ComputerName: to.Ptr("sample-compute"), - AdminUsername: to.Ptr("sample-user"), - AdminPassword: to.Ptr("Password01!@#"), - }, - NetworkProfile: &armcompute.NetworkProfile{ - NetworkInterfaces: []*armcompute.NetworkInterfaceReference{ - { - ID: to.Ptr(networkInterfaceID), - }, - }, - }, - }, - }, - nil, - ) - testsuite.Require().NoError(err) - vmResp, err := testutil.PollForTest(testsuite.ctx, vmPoller) - testsuite.Require().NoError(err) - testsuite.Require().Equal(*vmResp.Name, vmName) - - // virtual machine update - fmt.Println("Call operation: VirtualMachines_Update") - updatePoller, err := vmClient.BeginUpdate( - testsuite.ctx, - testsuite.resourceGroupName, - vmName, - armcompute.VirtualMachineUpdate{ - Tags: map[string]*string{ - "tag": to.Ptr("value"), - }, - }, - nil, - ) - testsuite.Require().NoError(err) - updateResp, err := testutil.PollForTest(testsuite.ctx, updatePoller) - testsuite.Require().NoError(err) - testsuite.Require().Equal(*updateResp.Name, vmName) - - // virtual machine get - fmt.Println("Call operation: VirtualMachines_Get") - resp, err := vmClient.Get(testsuite.ctx, testsuite.resourceGroupName, vmName, nil) - testsuite.Require().NoError(err) - testsuite.Require().Equal(*resp.Name, vmName) - - // virtual machine list - fmt.Println("Call operation: VirtualMachines_List") - vmList := vmClient.NewListPager(testsuite.resourceGroupName, nil) - testsuite.Require().Equal(vmList.More(), true) - - // delete virtual machine - fmt.Println("Call operation: VirtualMachines_Delete") - delPoller, err := vmClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, vmName, nil) - testsuite.Require().NoError(err) - _, err = testutil.PollForTest(testsuite.ctx, delPoller) - testsuite.Require().NoError(err) -} diff --git a/sdk/resourcemanager/compute/armcompute/virtualmachinescaleset_live_test.go b/sdk/resourcemanager/compute/armcompute/virtualmachinescaleset_live_test.go new file mode 100644 index 000000000000..d2e907027f7f --- /dev/null +++ b/sdk/resourcemanager/compute/armcompute/virtualmachinescaleset_live_test.go @@ -0,0 +1,399 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armcompute_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources" + "github.com/stretchr/testify/suite" +) + +type VirtualMachineScaleSetTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + adminUsername string + subnetId string + virtualNetworkSubnetName string + vmScaleSetName string + vmssExtensionName string + adminPassword string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *VirtualMachineScaleSetTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/compute/armcompute/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.adminUsername = testutil.GenerateAlphaNumericID(testsuite.T(), "vmuserna", 6) + testsuite.virtualNetworkSubnetName = testutil.GenerateAlphaNumericID(testsuite.T(), "vmssvnetna", 6) + testsuite.vmScaleSetName = testutil.GenerateAlphaNumericID(testsuite.T(), "vmscaleset", 6) + testsuite.vmssExtensionName = testutil.GenerateAlphaNumericID(testsuite.T(), "vmssextens", 6) + testsuite.adminPassword = testutil.GetEnv("ADMIN_PASSWORD", "") + testsuite.location = testutil.GetEnv("LOCATION", "eastus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name + testsuite.Prepare() +} + +func (testsuite *VirtualMachineScaleSetTestSuite) TearDownSuite() { + testsuite.Cleanup() + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestVirtualMachineScaleSetTestSuite(t *testing.T) { + suite.Run(t, new(VirtualMachineScaleSetTestSuite)) +} + +func (testsuite *VirtualMachineScaleSetTestSuite) Prepare() { + var err error + // From step Create_NetworkAndSubnet + template := map[string]any{ + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "outputs": map[string]any{ + "subnetId": map[string]any{ + "type": "string", + "value": "[resourceId('Microsoft.Network/virtualNetworks/subnets', parameters('virtualNetworkSubnetName'), 'default')]", + }, + }, + "parameters": map[string]any{ + "location": map[string]any{ + "type": "string", + "defaultValue": testsuite.location, + }, + "virtualNetworkSubnetName": map[string]any{ + "type": "string", + "defaultValue": testsuite.virtualNetworkSubnetName, + }, + }, + "resources": []any{ + map[string]any{ + "name": "[parameters('virtualNetworkSubnetName')]", + "type": "Microsoft.Network/virtualNetworks", + "apiVersion": "2021-05-01", + "location": "[parameters('location')]", + "properties": map[string]any{ + "addressSpace": map[string]any{ + "addressPrefixes": []any{ + "10.0.0.0/16", + }, + }, + "subnets": []any{ + map[string]any{ + "name": "default", + "properties": map[string]any{ + "addressPrefix": "10.0.0.0/24", + }, + }, + }, + }, + }, + }, + } + deployment := armresources.Deployment{ + Properties: &armresources.DeploymentProperties{ + Template: template, + Mode: to.Ptr(armresources.DeploymentModeIncremental), + }, + } + deploymentExtend, err := testutil.CreateDeployment(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName, "Create_NetworkAndSubnet", &deployment) + testsuite.Require().NoError(err) + testsuite.subnetId = deploymentExtend.Properties.Outputs.(map[string]interface{})["subnetId"].(map[string]interface{})["value"].(string) + + // From step VirtualMachineScaleSets_CreateOrUpdate + fmt.Println("Call operation: VirtualMachineScaleSets_CreateOrUpdate") + virtualMachineScaleSetsClient, err := armcompute.NewVirtualMachineScaleSetsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + virtualMachineScaleSetsClientCreateOrUpdateResponsePoller, err := virtualMachineScaleSetsClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmScaleSetName, armcompute.VirtualMachineScaleSet{ + Location: to.Ptr(testsuite.location), + Properties: &armcompute.VirtualMachineScaleSetProperties{ + Overprovision: to.Ptr(true), + UpgradePolicy: &armcompute.UpgradePolicy{ + Mode: to.Ptr(armcompute.UpgradeModeManual), + }, + VirtualMachineProfile: &armcompute.VirtualMachineScaleSetVMProfile{ + NetworkProfile: &armcompute.VirtualMachineScaleSetNetworkProfile{ + NetworkInterfaceConfigurations: []*armcompute.VirtualMachineScaleSetNetworkConfiguration{ + { + Name: to.Ptr(testsuite.vmScaleSetName), + Properties: &armcompute.VirtualMachineScaleSetNetworkConfigurationProperties{ + EnableIPForwarding: to.Ptr(true), + IPConfigurations: []*armcompute.VirtualMachineScaleSetIPConfiguration{ + { + Name: to.Ptr(testsuite.vmScaleSetName), + Properties: &armcompute.VirtualMachineScaleSetIPConfigurationProperties{ + Subnet: &armcompute.APIEntityReference{ + ID: to.Ptr(testsuite.subnetId), + }, + }, + }}, + Primary: to.Ptr(true), + }, + }}, + }, + OSProfile: &armcompute.VirtualMachineScaleSetOSProfile{ + AdminPassword: to.Ptr(testsuite.adminPassword), + AdminUsername: to.Ptr(testsuite.adminUsername), + ComputerNamePrefix: to.Ptr("vmss"), + }, + StorageProfile: &armcompute.VirtualMachineScaleSetStorageProfile{ + ImageReference: &armcompute.ImageReference{ + Offer: to.Ptr("WindowsServer"), + Publisher: to.Ptr("MicrosoftWindowsServer"), + SKU: to.Ptr("2016-Datacenter"), + Version: to.Ptr("latest"), + }, + OSDisk: &armcompute.VirtualMachineScaleSetOSDisk{ + Caching: to.Ptr(armcompute.CachingTypesReadWrite), + CreateOption: to.Ptr(armcompute.DiskCreateOptionTypesFromImage), + ManagedDisk: &armcompute.VirtualMachineScaleSetManagedDiskParameters{ + StorageAccountType: to.Ptr(armcompute.StorageAccountTypesStandardLRS), + }, + }, + }, + }, + }, + SKU: &armcompute.SKU{ + Name: to.Ptr("Standard_D1_v2"), + Capacity: to.Ptr[int64](3), + Tier: to.Ptr("Standard"), + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualMachineScaleSetsClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) +} + +// Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName} +func (testsuite *VirtualMachineScaleSetTestSuite) TestVirtualMachineScaleSets() { + var err error + // From step VirtualMachineScaleSets_ListByLocation + fmt.Println("Call operation: VirtualMachineScaleSets_ListByLocation") + virtualMachineScaleSetsClient, err := armcompute.NewVirtualMachineScaleSetsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + virtualMachineScaleSetsClientNewListByLocationPager := virtualMachineScaleSetsClient.NewListByLocationPager(testsuite.location, nil) + for virtualMachineScaleSetsClientNewListByLocationPager.More() { + _, err := virtualMachineScaleSetsClientNewListByLocationPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step VirtualMachineScaleSets_GetInstanceView + fmt.Println("Call operation: VirtualMachineScaleSets_GetInstanceView") + _, err = virtualMachineScaleSetsClient.GetInstanceView(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmScaleSetName, nil) + testsuite.Require().NoError(err) + + // From step VirtualMachineScaleSets_GetOSUpgradeHistory + fmt.Println("Call operation: VirtualMachineScaleSets_GetOSUpgradeHistory") + virtualMachineScaleSetsClientNewGetOSUpgradeHistoryPager := virtualMachineScaleSetsClient.NewGetOSUpgradeHistoryPager(testsuite.resourceGroupName, testsuite.vmScaleSetName, nil) + for virtualMachineScaleSetsClientNewGetOSUpgradeHistoryPager.More() { + _, err := virtualMachineScaleSetsClientNewGetOSUpgradeHistoryPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step VirtualMachineScaleSets_ListAll + fmt.Println("Call operation: VirtualMachineScaleSets_ListAll") + virtualMachineScaleSetsClientNewListAllPager := virtualMachineScaleSetsClient.NewListAllPager(nil) + for virtualMachineScaleSetsClientNewListAllPager.More() { + _, err := virtualMachineScaleSetsClientNewListAllPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step VirtualMachineScaleSets_Get + fmt.Println("Call operation: VirtualMachineScaleSets_Get") + _, err = virtualMachineScaleSetsClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmScaleSetName, &armcompute.VirtualMachineScaleSetsClientGetOptions{Expand: nil}) + testsuite.Require().NoError(err) + + // From step VirtualMachineScaleSets_List + fmt.Println("Call operation: VirtualMachineScaleSets_List") + virtualMachineScaleSetsClientNewListPager := virtualMachineScaleSetsClient.NewListPager(testsuite.resourceGroupName, nil) + for virtualMachineScaleSetsClientNewListPager.More() { + _, err := virtualMachineScaleSetsClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step VirtualMachineScaleSets_ListSkus + fmt.Println("Call operation: VirtualMachineScaleSets_ListSKUs") + virtualMachineScaleSetsClientNewListSKUsPager := virtualMachineScaleSetsClient.NewListSKUsPager(testsuite.resourceGroupName, testsuite.vmScaleSetName, nil) + for virtualMachineScaleSetsClientNewListSKUsPager.More() { + _, err := virtualMachineScaleSetsClientNewListSKUsPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step VirtualMachineScaleSets_Update + fmt.Println("Call operation: VirtualMachineScaleSets_Update") + virtualMachineScaleSetsClientUpdateResponsePoller, err := virtualMachineScaleSetsClient.BeginUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmScaleSetName, armcompute.VirtualMachineScaleSetUpdate{}, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualMachineScaleSetsClientUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step VirtualMachineScaleSets_Redeploy + fmt.Println("Call operation: VirtualMachineScaleSets_Redeploy") + virtualMachineScaleSetsClientRedeployResponsePoller, err := virtualMachineScaleSetsClient.BeginRedeploy(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmScaleSetName, &armcompute.VirtualMachineScaleSetsClientBeginRedeployOptions{VMInstanceIDs: nil}) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualMachineScaleSetsClientRedeployResponsePoller) + testsuite.Require().NoError(err) + + // From step VirtualMachineScaleSets_Deallocate + fmt.Println("Call operation: VirtualMachineScaleSets_Deallocate") + virtualMachineScaleSetsClientDeallocateResponsePoller, err := virtualMachineScaleSetsClient.BeginDeallocate(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmScaleSetName, &armcompute.VirtualMachineScaleSetsClientBeginDeallocateOptions{VMInstanceIDs: nil}) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualMachineScaleSetsClientDeallocateResponsePoller) + testsuite.Require().NoError(err) + + // From step VirtualMachineScaleSets_Start + fmt.Println("Call operation: VirtualMachineScaleSets_Start") + virtualMachineScaleSetsClientStartResponsePoller, err := virtualMachineScaleSetsClient.BeginStart(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmScaleSetName, &armcompute.VirtualMachineScaleSetsClientBeginStartOptions{VMInstanceIDs: nil}) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualMachineScaleSetsClientStartResponsePoller) + testsuite.Require().NoError(err) + + // From step VirtualMachineScaleSets_Reimage + fmt.Println("Call operation: VirtualMachineScaleSets_Reimage") + virtualMachineScaleSetsClientReimageResponsePoller, err := virtualMachineScaleSetsClient.BeginReimage(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmScaleSetName, &armcompute.VirtualMachineScaleSetsClientBeginReimageOptions{VMScaleSetReimageInput: nil}) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualMachineScaleSetsClientReimageResponsePoller) + testsuite.Require().NoError(err) + + // From step VirtualMachineScaleSets_Restart + fmt.Println("Call operation: VirtualMachineScaleSets_Restart") + virtualMachineScaleSetsClientRestartResponsePoller, err := virtualMachineScaleSetsClient.BeginRestart(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmScaleSetName, &armcompute.VirtualMachineScaleSetsClientBeginRestartOptions{VMInstanceIDs: nil}) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualMachineScaleSetsClientRestartResponsePoller) + testsuite.Require().NoError(err) + + // From step VirtualMachineScaleSets_ReimageAll + fmt.Println("Call operation: VirtualMachineScaleSets_ReimageAll") + virtualMachineScaleSetsClientReimageAllResponsePoller, err := virtualMachineScaleSetsClient.BeginReimageAll(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmScaleSetName, &armcompute.VirtualMachineScaleSetsClientBeginReimageAllOptions{VMInstanceIDs: nil}) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualMachineScaleSetsClientReimageAllResponsePoller) + testsuite.Require().NoError(err) +} + +// Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualMachines/{instanceId} +func (testsuite *VirtualMachineScaleSetTestSuite) TestVirtualMachineScaleSetVMs() { + instanceId := "0" + var err error + // From step VirtualMachineScaleSetVMs_List + fmt.Println("Call operation: VirtualMachineScaleSetVMs_List") + virtualMachineScaleSetVMsClient, err := armcompute.NewVirtualMachineScaleSetVMsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + virtualMachineScaleSetVMsClientNewListPager := virtualMachineScaleSetVMsClient.NewListPager(testsuite.resourceGroupName, testsuite.vmScaleSetName, &armcompute.VirtualMachineScaleSetVMsClientListOptions{Filter: nil, + Select: nil, + Expand: nil, + }) + for virtualMachineScaleSetVMsClientNewListPager.More() { + _, err := virtualMachineScaleSetVMsClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step VirtualMachineScaleSetVMs_Get + fmt.Println("Call operation: VirtualMachineScaleSetVMs_Get") + _, err = virtualMachineScaleSetVMsClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmScaleSetName, instanceId, &armcompute.VirtualMachineScaleSetVMsClientGetOptions{Expand: nil}) + testsuite.Require().NoError(err) + + // From step VirtualMachineScaleSetVMs_GetInstanceView + fmt.Println("Call operation: VirtualMachineScaleSetVMs_GetInstanceView") + _, err = virtualMachineScaleSetVMsClient.GetInstanceView(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmScaleSetName, instanceId, nil) + testsuite.Require().NoError(err) + + // From step VirtualMachineScaleSetVMs_Redeploy + fmt.Println("Call operation: VirtualMachineScaleSetVMs_Redeploy") + virtualMachineScaleSetVMsClientRedeployResponsePoller, err := virtualMachineScaleSetVMsClient.BeginRedeploy(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmScaleSetName, instanceId, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualMachineScaleSetVMsClientRedeployResponsePoller) + testsuite.Require().NoError(err) + + // From step VirtualMachineScaleSetVMs_Start + fmt.Println("Call operation: VirtualMachineScaleSetVMs_Start") + virtualMachineScaleSetVMsClientStartResponsePoller, err := virtualMachineScaleSetVMsClient.BeginStart(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmScaleSetName, instanceId, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualMachineScaleSetVMsClientStartResponsePoller) + testsuite.Require().NoError(err) + + // From step VirtualMachineScaleSetVMs_Restart + fmt.Println("Call operation: VirtualMachineScaleSetVMs_Restart") + virtualMachineScaleSetVMsClientRestartResponsePoller, err := virtualMachineScaleSetVMsClient.BeginRestart(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmScaleSetName, instanceId, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualMachineScaleSetVMsClientRestartResponsePoller) + testsuite.Require().NoError(err) + + // From step VirtualMachineScaleSetVMs_Deallocate + fmt.Println("Call operation: VirtualMachineScaleSetVMs_Deallocate") + virtualMachineScaleSetVMsClientDeallocateResponsePoller, err := virtualMachineScaleSetVMsClient.BeginDeallocate(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmScaleSetName, instanceId, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualMachineScaleSetVMsClientDeallocateResponsePoller) + testsuite.Require().NoError(err) + + // From step VirtualMachineScaleSetVMs_Reimage + fmt.Println("Call operation: VirtualMachineScaleSetVMs_Reimage") + virtualMachineScaleSetVMsClientReimageResponsePoller, err := virtualMachineScaleSetVMsClient.BeginReimage(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmScaleSetName, instanceId, &armcompute.VirtualMachineScaleSetVMsClientBeginReimageOptions{VMScaleSetVMReimageInput: nil}) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualMachineScaleSetVMsClientReimageResponsePoller) + testsuite.Require().NoError(err) + + // From step VirtualMachineScaleSetVMs_PowerOff + fmt.Println("Call operation: VirtualMachineScaleSetVMs_PowerOff") + virtualMachineScaleSetVMsClientPowerOffResponsePoller, err := virtualMachineScaleSetVMsClient.BeginPowerOff(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmScaleSetName, instanceId, &armcompute.VirtualMachineScaleSetVMsClientBeginPowerOffOptions{SkipShutdown: nil}) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualMachineScaleSetVMsClientPowerOffResponsePoller) + testsuite.Require().NoError(err) + + // From step VirtualMachineScaleSetVMs_Delete + fmt.Println("Call operation: VirtualMachineScaleSetVMs_Delete") + virtualMachineScaleSetVMsClientDeleteResponsePoller, err := virtualMachineScaleSetVMsClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmScaleSetName, instanceId, &armcompute.VirtualMachineScaleSetVMsClientBeginDeleteOptions{ForceDeletion: to.Ptr(true)}) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualMachineScaleSetVMsClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} + +func (testsuite *VirtualMachineScaleSetTestSuite) Cleanup() { + var err error + // From step VirtualMachineScaleSets_PowerOff + fmt.Println("Call operation: VirtualMachineScaleSets_PowerOff") + virtualMachineScaleSetsClient, err := armcompute.NewVirtualMachineScaleSetsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + virtualMachineScaleSetsClientPowerOffResponsePoller, err := virtualMachineScaleSetsClient.BeginPowerOff(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmScaleSetName, &armcompute.VirtualMachineScaleSetsClientBeginPowerOffOptions{SkipShutdown: nil, + VMInstanceIDs: nil, + }) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualMachineScaleSetsClientPowerOffResponsePoller) + testsuite.Require().NoError(err) + + // From step VirtualMachineScaleSets_Delete + fmt.Println("Call operation: VirtualMachineScaleSets_Delete") + virtualMachineScaleSetsClientDeleteResponsePoller, err := virtualMachineScaleSetsClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.vmScaleSetName, &armcompute.VirtualMachineScaleSetsClientBeginDeleteOptions{ForceDeletion: to.Ptr(true)}) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualMachineScaleSetsClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/compute/armcompute/virtualmachinescalesets_client_live_test.go b/sdk/resourcemanager/compute/armcompute/virtualmachinescalesets_client_live_test.go deleted file mode 100644 index e791fbfeef4e..000000000000 --- a/sdk/resourcemanager/compute/armcompute/virtualmachinescalesets_client_live_test.go +++ /dev/null @@ -1,195 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package armcompute_test - -import ( - "context" - "fmt" - "testing" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork" - "github.com/stretchr/testify/suite" -) - -type VirtualMachineScaleSetsClientTestSuite struct { - suite.Suite - - ctx context.Context - cred azcore.TokenCredential - options *arm.ClientOptions - location string - resourceGroupName string - subscriptionID string -} - -func (testsuite *VirtualMachineScaleSetsClientTestSuite) SetupSuite() { - testsuite.ctx = context.Background() - testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) - testsuite.location = testutil.GetEnv("LOCATION", "eastus") - testsuite.subscriptionID = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") - testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/compute/armcompute/testdata") - resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionID, testsuite.cred, testsuite.options, testsuite.location) - testsuite.Require().NoError(err) - testsuite.resourceGroupName = *resourceGroup.Name -} - -func (testsuite *VirtualMachineScaleSetsClientTestSuite) TearDownSuite() { - _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionID, testsuite.cred, testsuite.options, testsuite.resourceGroupName) - testsuite.Require().NoError(err) - testutil.StopRecording(testsuite.T()) -} - -func TestVirtualMachineScaleSetsClient(t *testing.T) { - suite.Run(t, new(VirtualMachineScaleSetsClientTestSuite)) -} - -func (testsuite *VirtualMachineScaleSetsClientTestSuite) TestVirtualMachineScaleSetsCRUD() { - // create virtual network and subnet - vnClient, err := armnetwork.NewVirtualNetworksClient(testsuite.subscriptionID, testsuite.cred, testsuite.options) - testsuite.Require().NoError(err) - vnName := "go-test-network" - subName := "go-test-subnet" - vnPoller, err := vnClient.BeginCreateOrUpdate( - testsuite.ctx, - testsuite.resourceGroupName, - vnName, - armnetwork.VirtualNetwork{ - Location: to.Ptr(testsuite.location), - Properties: &armnetwork.VirtualNetworkPropertiesFormat{ - AddressSpace: &armnetwork.AddressSpace{ - AddressPrefixes: []*string{ - to.Ptr("10.1.0.0/16"), - }, - }, - Subnets: []*armnetwork.Subnet{ - { - Name: to.Ptr(subName), - Properties: &armnetwork.SubnetPropertiesFormat{ - AddressPrefix: to.Ptr("10.1.0.0/24"), - }, - }, - }, - }, - }, - nil, - ) - testsuite.Require().NoError(err) - vnResp, err := testutil.PollForTest(testsuite.ctx, vnPoller) - testsuite.Require().NoError(err) - testsuite.Require().Equal(vnName, *vnResp.Name) - - // create virtual machine scale set - fmt.Println("Call operation: VirtualMachineScaleSets_CreateOrUpdate") - vmssClient, err := armcompute.NewVirtualMachineScaleSetsClient(testsuite.subscriptionID, testsuite.cred, testsuite.options) - testsuite.Require().NoError(err) - vmssName := "go-test-vmss" - vmssPoller, err := vmssClient.BeginCreateOrUpdate( - testsuite.ctx, - testsuite.resourceGroupName, - vmssName, - armcompute.VirtualMachineScaleSet{ - Location: to.Ptr(testsuite.location), - SKU: &armcompute.SKU{ - //Name: to.Ptr("Basic_A0"), //armcompute.VirtualMachineSizeTypesBasicA0 - Name: to.Ptr("Standard_A0"), //armcompute.VirtualMachineSizeTypesBasicA0 - Capacity: to.Ptr[int64](1), - }, - Properties: &armcompute.VirtualMachineScaleSetProperties{ - Overprovision: to.Ptr(false), - UpgradePolicy: &armcompute.UpgradePolicy{ - Mode: to.Ptr(armcompute.UpgradeModeManual), - AutomaticOSUpgradePolicy: &armcompute.AutomaticOSUpgradePolicy{ - EnableAutomaticOSUpgrade: to.Ptr(false), - DisableAutomaticRollback: to.Ptr(false), - }, - }, - VirtualMachineProfile: &armcompute.VirtualMachineScaleSetVMProfile{ - OSProfile: &armcompute.VirtualMachineScaleSetOSProfile{ - ComputerNamePrefix: to.Ptr("vmss"), - AdminUsername: to.Ptr("sample-user"), - AdminPassword: to.Ptr("Password01!@#"), - }, - StorageProfile: &armcompute.VirtualMachineScaleSetStorageProfile{ - ImageReference: &armcompute.ImageReference{ - Offer: to.Ptr("WindowsServer"), - Publisher: to.Ptr("MicrosoftWindowsServer"), - SKU: to.Ptr("2019-Datacenter"), - Version: to.Ptr("latest"), - }, - }, - NetworkProfile: &armcompute.VirtualMachineScaleSetNetworkProfile{ - NetworkInterfaceConfigurations: []*armcompute.VirtualMachineScaleSetNetworkConfiguration{ - { - Name: to.Ptr(vmssName), - Properties: &armcompute.VirtualMachineScaleSetNetworkConfigurationProperties{ - Primary: to.Ptr(true), - EnableIPForwarding: to.Ptr(true), - IPConfigurations: []*armcompute.VirtualMachineScaleSetIPConfiguration{ - { - Name: to.Ptr(vmssName), - Properties: &armcompute.VirtualMachineScaleSetIPConfigurationProperties{ - Subnet: &armcompute.APIEntityReference{ - ID: to.Ptr(*vnResp.Properties.Subnets[0].ID), - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - nil, - ) - testsuite.Require().NoError(err) - vmssResp, err := testutil.PollForTest(testsuite.ctx, vmssPoller) - testsuite.Require().NoError(err) - testsuite.Require().Equal(vmssName, *vmssResp.Name) - - // update - fmt.Println("Call operation: VirtualMachineScaleSets_Update") - updatePollerResp, err := vmssClient.BeginUpdate( - testsuite.ctx, - testsuite.resourceGroupName, - vmssName, - armcompute.VirtualMachineScaleSetUpdate{ - Tags: map[string]*string{ - "test": to.Ptr("live"), - }, - }, - nil, - ) - testsuite.Require().NoError(err) - updateResp, err := testutil.PollForTest(testsuite.ctx, updatePollerResp) - testsuite.Require().NoError(err) - testsuite.Require().Equal("live", *updateResp.Tags["test"]) - - // get - fmt.Println("Call operation: VirtualMachineScaleSets_Get") - getResp, err := vmssClient.Get(testsuite.ctx, testsuite.resourceGroupName, vmssName, nil) - testsuite.Require().NoError(err) - testsuite.Require().Equal(vmssName, *getResp.Name) - - // list - fmt.Println("Call operation: VirtualMachineScaleSets_List") - listResp := vmssClient.NewListPager(testsuite.resourceGroupName, nil) - testsuite.Require().True(listResp.More()) - - // delete - fmt.Println("Call operation: VirtualMachineScaleSets_Delete") - delPoller, err := vmssClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, vmssName, nil) - testsuite.Require().NoError(err) - _, err = testutil.PollForTest(testsuite.ctx, delPoller) - testsuite.Require().NoError(err) -} From c005ed6159fdf2104077694b5f89e7063a0cc586 Mon Sep 17 00:00:00 2001 From: Peng Jiahui <46921893+Alancere@users.noreply.github.com> Date: Thu, 4 May 2023 15:13:35 +0800 Subject: [PATCH 17/50] sdk/resourcemanager/network/armnetwork live test (#20331) * sdk/resourcemanager/network/armnetwork live test * update subscriptionId default value * update recording --- ...iongatewaywafdynamicmanifests_live_test.go | 70 +++ .../applicationsecuritygroup_live_test.go | 114 +++++ .../network/armnetwork/assets.json | 2 +- .../availabledelegations_live_test.go | 81 +++ .../availableservicealiases_live_test.go | 79 +++ .../azurefirewallfqdntag_live_test.go | 70 +++ .../armnetwork/azurewebcategory_live_test.go | 70 +++ .../armnetwork/bastionhost_live_test.go | 183 +++++++ .../checkdnsavailability_live_test.go | 66 +++ .../armnetwork/customipprefix_live_test.go | 109 +++++ .../ddosprotectionplan_live_test.go | 114 +++++ .../armnetwork/endpointservice_live_test.go | 70 +++ .../expressroutecircuit_live_test.go | 251 ++++++++++ .../armnetwork/firewallpolicy_live_test.go | 176 +++++++ sdk/resourcemanager/network/armnetwork/go.mod | 26 +- sdk/resourcemanager/network/armnetwork/go.sum | 57 ++- .../armnetwork/ipgroups_client_live_test.go | 116 ----- .../network/armnetwork/ipgroups_live_test.go | 122 +++++ .../armnetwork/loadbalancer_live_test.go | 335 +++++++++++++ .../armnetwork/natgateway_live_test.go | 116 +++++ .../armnetwork/networkinterface_live_test.go | 185 +++++++ .../armnetwork/networkmanager_live_test.go | 169 +++++++ .../networkmanagerconnection_live_test.go | 121 +++++ ...agerconnectivityconfiguration_live_test.go | 145 ++++++ .../networkmanagergroup_live_test.go | 188 +++++++ ...gersecurityadminconfiguration_live_test.go | 252 ++++++++++ .../armnetwork/networkprofile_live_test.go | 161 ++++++ .../networksecuritygroup_live_test.go | 191 ++++++++ .../armnetwork/networkwatcher_live_test.go | 113 +++++ .../network/armnetwork/operation_live_test.go | 70 +++ .../armnetwork/publicipaddress_live_test.go | 113 +++++ .../armnetwork/publicipprefix_live_test.go | 119 +++++ .../armnetwork/routetable_live_test.go | 165 +++++++ .../armnetwork/servicecommunity_live_test.go | 70 +++ .../serviceendpointpolicy_live_test.go | 167 +++++++ .../armnetwork/servicetags_live_test.go | 79 +++ .../armnetwork/subnets_client_live_test.go | 122 ----- .../network/armnetwork/usage_live_test.go | 70 +++ .../armnetwork/virtualnetwork_live_test.go | 258 ++++++++++ .../virtualnetworkgateway_live_test.go | 237 +++++++++ .../virtualnetworks_client_live_test.go | 117 ----- .../armnetwork/virtualwan_live_test.go | 461 ++++++++++++++++++ .../webapplicationfirewall_live_test.go | 120 +++++ 43 files changed, 5525 insertions(+), 395 deletions(-) create mode 100644 sdk/resourcemanager/network/armnetwork/applicationgatewaywafdynamicmanifests_live_test.go create mode 100644 sdk/resourcemanager/network/armnetwork/applicationsecuritygroup_live_test.go create mode 100644 sdk/resourcemanager/network/armnetwork/availabledelegations_live_test.go create mode 100644 sdk/resourcemanager/network/armnetwork/availableservicealiases_live_test.go create mode 100644 sdk/resourcemanager/network/armnetwork/azurefirewallfqdntag_live_test.go create mode 100644 sdk/resourcemanager/network/armnetwork/azurewebcategory_live_test.go create mode 100644 sdk/resourcemanager/network/armnetwork/bastionhost_live_test.go create mode 100644 sdk/resourcemanager/network/armnetwork/checkdnsavailability_live_test.go create mode 100644 sdk/resourcemanager/network/armnetwork/customipprefix_live_test.go create mode 100644 sdk/resourcemanager/network/armnetwork/ddosprotectionplan_live_test.go create mode 100644 sdk/resourcemanager/network/armnetwork/endpointservice_live_test.go create mode 100644 sdk/resourcemanager/network/armnetwork/expressroutecircuit_live_test.go create mode 100644 sdk/resourcemanager/network/armnetwork/firewallpolicy_live_test.go delete mode 100644 sdk/resourcemanager/network/armnetwork/ipgroups_client_live_test.go create mode 100644 sdk/resourcemanager/network/armnetwork/ipgroups_live_test.go create mode 100644 sdk/resourcemanager/network/armnetwork/loadbalancer_live_test.go create mode 100644 sdk/resourcemanager/network/armnetwork/natgateway_live_test.go create mode 100644 sdk/resourcemanager/network/armnetwork/networkinterface_live_test.go create mode 100644 sdk/resourcemanager/network/armnetwork/networkmanager_live_test.go create mode 100644 sdk/resourcemanager/network/armnetwork/networkmanagerconnection_live_test.go create mode 100644 sdk/resourcemanager/network/armnetwork/networkmanagerconnectivityconfiguration_live_test.go create mode 100644 sdk/resourcemanager/network/armnetwork/networkmanagergroup_live_test.go create mode 100644 sdk/resourcemanager/network/armnetwork/networkmanagersecurityadminconfiguration_live_test.go create mode 100644 sdk/resourcemanager/network/armnetwork/networkprofile_live_test.go create mode 100644 sdk/resourcemanager/network/armnetwork/networksecuritygroup_live_test.go create mode 100644 sdk/resourcemanager/network/armnetwork/networkwatcher_live_test.go create mode 100644 sdk/resourcemanager/network/armnetwork/operation_live_test.go create mode 100644 sdk/resourcemanager/network/armnetwork/publicipaddress_live_test.go create mode 100644 sdk/resourcemanager/network/armnetwork/publicipprefix_live_test.go create mode 100644 sdk/resourcemanager/network/armnetwork/routetable_live_test.go create mode 100644 sdk/resourcemanager/network/armnetwork/servicecommunity_live_test.go create mode 100644 sdk/resourcemanager/network/armnetwork/serviceendpointpolicy_live_test.go create mode 100644 sdk/resourcemanager/network/armnetwork/servicetags_live_test.go delete mode 100644 sdk/resourcemanager/network/armnetwork/subnets_client_live_test.go create mode 100644 sdk/resourcemanager/network/armnetwork/usage_live_test.go create mode 100644 sdk/resourcemanager/network/armnetwork/virtualnetwork_live_test.go create mode 100644 sdk/resourcemanager/network/armnetwork/virtualnetworkgateway_live_test.go delete mode 100644 sdk/resourcemanager/network/armnetwork/virtualnetworks_client_live_test.go create mode 100644 sdk/resourcemanager/network/armnetwork/virtualwan_live_test.go create mode 100644 sdk/resourcemanager/network/armnetwork/webapplicationfirewall_live_test.go diff --git a/sdk/resourcemanager/network/armnetwork/applicationgatewaywafdynamicmanifests_live_test.go b/sdk/resourcemanager/network/armnetwork/applicationgatewaywafdynamicmanifests_live_test.go new file mode 100644 index 000000000000..bff0b0ce73fc --- /dev/null +++ b/sdk/resourcemanager/network/armnetwork/applicationgatewaywafdynamicmanifests_live_test.go @@ -0,0 +1,70 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armnetwork_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2" + "github.com/stretchr/testify/suite" +) + +type ApplicationGatewayWafDynamicManifestsTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *ApplicationGatewayWafDynamicManifestsTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/network/armnetwork/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name +} + +func (testsuite *ApplicationGatewayWafDynamicManifestsTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestApplicationGatewayWafDynamicManifestsTestSuite(t *testing.T) { + suite.Run(t, new(ApplicationGatewayWafDynamicManifestsTestSuite)) +} + +// Microsoft.Network/locations/{location}/applicationGatewayWafDynamicManifests +func (testsuite *ApplicationGatewayWafDynamicManifestsTestSuite) TestApplicationGatewayWafDynamicManifests() { + var err error + // From step ApplicationGatewayWafDynamicManifests_Get + fmt.Println("Call operation: ApplicationGatewayWafDynamicManifests_Get") + applicationGatewayWafDynamicManifestsClient, err := armnetwork.NewApplicationGatewayWafDynamicManifestsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + applicationGatewayWafDynamicManifestsClientNewGetPager := applicationGatewayWafDynamicManifestsClient.NewGetPager(testsuite.location, nil) + for applicationGatewayWafDynamicManifestsClientNewGetPager.More() { + _, err := applicationGatewayWafDynamicManifestsClientNewGetPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } +} diff --git a/sdk/resourcemanager/network/armnetwork/applicationsecuritygroup_live_test.go b/sdk/resourcemanager/network/armnetwork/applicationsecuritygroup_live_test.go new file mode 100644 index 000000000000..bc0277742adb --- /dev/null +++ b/sdk/resourcemanager/network/armnetwork/applicationsecuritygroup_live_test.go @@ -0,0 +1,114 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armnetwork_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2" + "github.com/stretchr/testify/suite" +) + +type ApplicationSecurityGroupTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + applicationSecurityGroupName string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *ApplicationSecurityGroupTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/network/armnetwork/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.applicationSecurityGroupName = testutil.GenerateAlphaNumericID(testsuite.T(), "applicatio", 6) + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name +} + +func (testsuite *ApplicationSecurityGroupTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestApplicationSecurityGroupTestSuite(t *testing.T) { + suite.Run(t, new(ApplicationSecurityGroupTestSuite)) +} + +// Microsoft.Network/applicationSecurityGroups/{applicationSecurityGroupName} +func (testsuite *ApplicationSecurityGroupTestSuite) TestApplicationSecurityGroups() { + var err error + // From step ApplicationSecurityGroups_CreateOrUpdate + fmt.Println("Call operation: ApplicationSecurityGroups_CreateOrUpdate") + applicationSecurityGroupsClient, err := armnetwork.NewApplicationSecurityGroupsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + applicationSecurityGroupsClientCreateOrUpdateResponsePoller, err := applicationSecurityGroupsClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.applicationSecurityGroupName, armnetwork.ApplicationSecurityGroup{ + Location: to.Ptr(testsuite.location), + Properties: &armnetwork.ApplicationSecurityGroupPropertiesFormat{}, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, applicationSecurityGroupsClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step ApplicationSecurityGroups_ListAll + fmt.Println("Call operation: ApplicationSecurityGroups_ListAll") + applicationSecurityGroupsClientNewListAllPager := applicationSecurityGroupsClient.NewListAllPager(nil) + for applicationSecurityGroupsClientNewListAllPager.More() { + _, err := applicationSecurityGroupsClientNewListAllPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step ApplicationSecurityGroups_List + fmt.Println("Call operation: ApplicationSecurityGroups_List") + applicationSecurityGroupsClientNewListPager := applicationSecurityGroupsClient.NewListPager(testsuite.resourceGroupName, nil) + for applicationSecurityGroupsClientNewListPager.More() { + _, err := applicationSecurityGroupsClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step ApplicationSecurityGroups_Get + fmt.Println("Call operation: ApplicationSecurityGroups_Get") + _, err = applicationSecurityGroupsClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.applicationSecurityGroupName, nil) + testsuite.Require().NoError(err) + + // From step ApplicationSecurityGroups_UpdateTags + fmt.Println("Call operation: ApplicationSecurityGroups_UpdateTags") + _, err = applicationSecurityGroupsClient.UpdateTags(testsuite.ctx, testsuite.resourceGroupName, testsuite.applicationSecurityGroupName, armnetwork.TagsObject{ + Tags: map[string]*string{ + "tag1": to.Ptr("value1"), + "tag2": to.Ptr("value2"), + }, + }, nil) + testsuite.Require().NoError(err) + + // From step ApplicationSecurityGroups_Delete + fmt.Println("Call operation: ApplicationSecurityGroups_Delete") + applicationSecurityGroupsClientDeleteResponsePoller, err := applicationSecurityGroupsClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.applicationSecurityGroupName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, applicationSecurityGroupsClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/network/armnetwork/assets.json b/sdk/resourcemanager/network/armnetwork/assets.json index 04dbda1a080f..12d5b800368f 100644 --- a/sdk/resourcemanager/network/armnetwork/assets.json +++ b/sdk/resourcemanager/network/armnetwork/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "go", "TagPrefix": "go/resourcemanager/network/armnetwork", - "Tag": "go/resourcemanager/network/armnetwork_98a17ae925" + "Tag": "go/resourcemanager/network/armnetwork_f214e85fe5" } diff --git a/sdk/resourcemanager/network/armnetwork/availabledelegations_live_test.go b/sdk/resourcemanager/network/armnetwork/availabledelegations_live_test.go new file mode 100644 index 000000000000..543b5129a2e2 --- /dev/null +++ b/sdk/resourcemanager/network/armnetwork/availabledelegations_live_test.go @@ -0,0 +1,81 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armnetwork_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2" + "github.com/stretchr/testify/suite" +) + +type AvailableDelegationsTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *AvailableDelegationsTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/network/armnetwork/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name +} + +func (testsuite *AvailableDelegationsTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestAvailableDelegationsTestSuite(t *testing.T) { + suite.Run(t, new(AvailableDelegationsTestSuite)) +} + +// Microsoft.Network/locations/{location}/availableDelegations +func (testsuite *AvailableDelegationsTestSuite) TestAvailableDelegations() { + var err error + // From step AvailableDelegations_List + fmt.Println("Call operation: AvailableDelegations_List") + availableDelegationsClient, err := armnetwork.NewAvailableDelegationsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + availableDelegationsClientNewListPager := availableDelegationsClient.NewListPager(testsuite.location, nil) + for availableDelegationsClientNewListPager.More() { + _, err := availableDelegationsClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step AvailableResourceGroupDelegations_List + fmt.Println("Call operation: AvailableResourceGroupDelegations_List") + availableResourceGroupDelegationsClient, err := armnetwork.NewAvailableResourceGroupDelegationsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + availableResourceGroupDelegationsClientNewListPager := availableResourceGroupDelegationsClient.NewListPager(testsuite.location, testsuite.resourceGroupName, nil) + for availableResourceGroupDelegationsClientNewListPager.More() { + _, err := availableResourceGroupDelegationsClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } +} diff --git a/sdk/resourcemanager/network/armnetwork/availableservicealiases_live_test.go b/sdk/resourcemanager/network/armnetwork/availableservicealiases_live_test.go new file mode 100644 index 000000000000..c60243efbf90 --- /dev/null +++ b/sdk/resourcemanager/network/armnetwork/availableservicealiases_live_test.go @@ -0,0 +1,79 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armnetwork_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2" + "github.com/stretchr/testify/suite" +) + +type AvailableServiceAliasesTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *AvailableServiceAliasesTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/network/armnetwork/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name +} + +func (testsuite *AvailableServiceAliasesTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestAvailableServiceAliasesTestSuite(t *testing.T) { + suite.Run(t, new(AvailableServiceAliasesTestSuite)) +} + +// Microsoft.Network/locations/{location}/availableServiceAliases +func (testsuite *AvailableServiceAliasesTestSuite) TestAvailableServiceAliases() { + var err error + // From step AvailableServiceAliases_List + fmt.Println("Call operation: AvailableServiceAliases_List") + availableServiceAliasesClient, err := armnetwork.NewAvailableServiceAliasesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + availableServiceAliasesClientNewListPager := availableServiceAliasesClient.NewListPager(testsuite.location, nil) + for availableServiceAliasesClientNewListPager.More() { + _, err := availableServiceAliasesClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step AvailableServiceAliases_ListByResourceGroup + fmt.Println("Call operation: AvailableServiceAliases_ListByResourceGroup") + availableServiceAliasesClientNewListByResourceGroupPager := availableServiceAliasesClient.NewListByResourceGroupPager(testsuite.resourceGroupName, testsuite.location, nil) + for availableServiceAliasesClientNewListByResourceGroupPager.More() { + _, err := availableServiceAliasesClientNewListByResourceGroupPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } +} diff --git a/sdk/resourcemanager/network/armnetwork/azurefirewallfqdntag_live_test.go b/sdk/resourcemanager/network/armnetwork/azurefirewallfqdntag_live_test.go new file mode 100644 index 000000000000..ddda8e626302 --- /dev/null +++ b/sdk/resourcemanager/network/armnetwork/azurefirewallfqdntag_live_test.go @@ -0,0 +1,70 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armnetwork_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2" + "github.com/stretchr/testify/suite" +) + +type AzureFirewallFqdnTagTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *AzureFirewallFqdnTagTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/network/armnetwork/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name +} + +func (testsuite *AzureFirewallFqdnTagTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestAzureFirewallFqdnTagTestSuite(t *testing.T) { + suite.Run(t, new(AzureFirewallFqdnTagTestSuite)) +} + +// Microsoft.Network/azureFirewallFqdnTags +func (testsuite *AzureFirewallFqdnTagTestSuite) TestAzureFirewallFqdnTags() { + var err error + // From step AzureFirewallFqdnTags_ListAll + fmt.Println("Call operation: AzureFirewallFqdnTags_ListAll") + azureFirewallFqdnTagsClient, err := armnetwork.NewAzureFirewallFqdnTagsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + azureFirewallFqdnTagsClientNewListAllPager := azureFirewallFqdnTagsClient.NewListAllPager(nil) + for azureFirewallFqdnTagsClientNewListAllPager.More() { + _, err := azureFirewallFqdnTagsClientNewListAllPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } +} diff --git a/sdk/resourcemanager/network/armnetwork/azurewebcategory_live_test.go b/sdk/resourcemanager/network/armnetwork/azurewebcategory_live_test.go new file mode 100644 index 000000000000..7de8e5fb8cb4 --- /dev/null +++ b/sdk/resourcemanager/network/armnetwork/azurewebcategory_live_test.go @@ -0,0 +1,70 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armnetwork_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2" + "github.com/stretchr/testify/suite" +) + +type AzureWebCategoryTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *AzureWebCategoryTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/network/armnetwork/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name +} + +func (testsuite *AzureWebCategoryTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestAzureWebCategoryTestSuite(t *testing.T) { + suite.Run(t, new(AzureWebCategoryTestSuite)) +} + +// Microsoft.Network/azureWebCategories +func (testsuite *AzureWebCategoryTestSuite) TestWebCategories() { + var err error + // From step WebCategories_ListBySubscription + fmt.Println("Call operation: WebCategories_ListBySubscription") + webCategoriesClient, err := armnetwork.NewWebCategoriesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + webCategoriesClientNewListBySubscriptionPager := webCategoriesClient.NewListBySubscriptionPager(nil) + for webCategoriesClientNewListBySubscriptionPager.More() { + _, err := webCategoriesClientNewListBySubscriptionPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } +} diff --git a/sdk/resourcemanager/network/armnetwork/bastionhost_live_test.go b/sdk/resourcemanager/network/armnetwork/bastionhost_live_test.go new file mode 100644 index 000000000000..871dee50f565 --- /dev/null +++ b/sdk/resourcemanager/network/armnetwork/bastionhost_live_test.go @@ -0,0 +1,183 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armnetwork_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2" + "github.com/stretchr/testify/suite" +) + +type BastionHostTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + bastionHostName string + publicIpAddressId string + subnetId string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *BastionHostTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/network/armnetwork/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.bastionHostName = testutil.GenerateAlphaNumericID(testsuite.T(), "bastionhos", 6) + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name + testsuite.Prepare() +} + +func (testsuite *BastionHostTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestBastionHostTestSuite(t *testing.T) { + suite.Run(t, new(BastionHostTestSuite)) +} + +func (testsuite *BastionHostTestSuite) Prepare() { + var err error + // From step VirtualNetworks_CreateOrUpdate + fmt.Println("Call operation: VirtualNetworks_CreateOrUpdate") + virtualNetworksClient, err := armnetwork.NewVirtualNetworksClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + virtualNetworksClientCreateOrUpdateResponsePoller, err := virtualNetworksClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, "test-vnet", armnetwork.VirtualNetwork{ + Location: to.Ptr(testsuite.location), + Properties: &armnetwork.VirtualNetworkPropertiesFormat{ + AddressSpace: &armnetwork.AddressSpace{ + AddressPrefixes: []*string{ + to.Ptr("10.0.0.0/16")}, + }, + Subnets: []*armnetwork.Subnet{ + { + Name: to.Ptr("AzureBastionSubnet"), + Properties: &armnetwork.SubnetPropertiesFormat{ + AddressPrefix: to.Ptr("10.0.0.0/24"), + }, + }}, + }, + }, nil) + testsuite.Require().NoError(err) + var virtualNetworksClientCreateOrUpdateResponse *armnetwork.VirtualNetworksClientCreateOrUpdateResponse + virtualNetworksClientCreateOrUpdateResponse, err = testutil.PollForTest(testsuite.ctx, virtualNetworksClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + testsuite.subnetId = *virtualNetworksClientCreateOrUpdateResponse.Properties.Subnets[0].ID + + // From step PublicIPAddresses_CreateOrUpdate + fmt.Println("Call operation: PublicIPAddresses_CreateOrUpdate") + publicIPAddressesClient, err := armnetwork.NewPublicIPAddressesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + publicIPAddressesClientCreateOrUpdateResponsePoller, err := publicIPAddressesClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, "test-ip", armnetwork.PublicIPAddress{ + Location: to.Ptr(testsuite.location), + Properties: &armnetwork.PublicIPAddressPropertiesFormat{ + IdleTimeoutInMinutes: to.Ptr[int32](10), + PublicIPAddressVersion: to.Ptr(armnetwork.IPVersionIPv4), + PublicIPAllocationMethod: to.Ptr(armnetwork.IPAllocationMethodStatic), + }, + SKU: &armnetwork.PublicIPAddressSKU{ + Name: to.Ptr(armnetwork.PublicIPAddressSKUNameStandard), + Tier: to.Ptr(armnetwork.PublicIPAddressSKUTierRegional), + }, + }, nil) + testsuite.Require().NoError(err) + var publicIPAddressesClientCreateOrUpdateResponse *armnetwork.PublicIPAddressesClientCreateOrUpdateResponse + publicIPAddressesClientCreateOrUpdateResponse, err = testutil.PollForTest(testsuite.ctx, publicIPAddressesClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + testsuite.publicIpAddressId = *publicIPAddressesClientCreateOrUpdateResponse.ID +} + +// Microsoft.Network/bastionHosts/{bastionHostName} +func (testsuite *BastionHostTestSuite) TestBastionHosts() { + var err error + // From step BastionHosts_CreateOrUpdate + fmt.Println("Call operation: BastionHosts_CreateOrUpdate") + bastionHostsClient, err := armnetwork.NewBastionHostsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + bastionHostsClientCreateOrUpdateResponsePoller, err := bastionHostsClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.bastionHostName, armnetwork.BastionHost{ + Location: to.Ptr(testsuite.location), + Properties: &armnetwork.BastionHostPropertiesFormat{ + IPConfigurations: []*armnetwork.BastionHostIPConfiguration{ + { + Name: to.Ptr("bastionHostIpConfiguration"), + Properties: &armnetwork.BastionHostIPConfigurationPropertiesFormat{ + PublicIPAddress: &armnetwork.SubResource{ + ID: to.Ptr(testsuite.publicIpAddressId), + }, + Subnet: &armnetwork.SubResource{ + ID: to.Ptr(testsuite.subnetId), + }, + }, + }}, + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, bastionHostsClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step BastionHosts_List + fmt.Println("Call operation: BastionHosts_List") + bastionHostsClientNewListPager := bastionHostsClient.NewListPager(nil) + for bastionHostsClientNewListPager.More() { + _, err := bastionHostsClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step BastionHosts_ListByResourceGroup + fmt.Println("Call operation: BastionHosts_ListByResourceGroup") + bastionHostsClientNewListByResourceGroupPager := bastionHostsClient.NewListByResourceGroupPager(testsuite.resourceGroupName, nil) + for bastionHostsClientNewListByResourceGroupPager.More() { + _, err := bastionHostsClientNewListByResourceGroupPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step BastionHosts_Get + fmt.Println("Call operation: BastionHosts_Get") + _, err = bastionHostsClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.bastionHostName, nil) + testsuite.Require().NoError(err) + + // From step BastionHosts_UpdateTags + fmt.Println("Call operation: BastionHosts_UpdateTags") + bastionHostsClientUpdateTagsResponsePoller, err := bastionHostsClient.BeginUpdateTags(testsuite.ctx, testsuite.resourceGroupName, testsuite.bastionHostName, armnetwork.TagsObject{ + Tags: map[string]*string{ + "tag1": to.Ptr("value1"), + "tag2": to.Ptr("value2"), + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, bastionHostsClientUpdateTagsResponsePoller) + testsuite.Require().NoError(err) + + // From step BastionHosts_Delete + fmt.Println("Call operation: BastionHosts_Delete") + bastionHostsClientDeleteResponsePoller, err := bastionHostsClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.bastionHostName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, bastionHostsClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/network/armnetwork/checkdnsavailability_live_test.go b/sdk/resourcemanager/network/armnetwork/checkdnsavailability_live_test.go new file mode 100644 index 000000000000..ecb3f671014b --- /dev/null +++ b/sdk/resourcemanager/network/armnetwork/checkdnsavailability_live_test.go @@ -0,0 +1,66 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armnetwork_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2" + "github.com/stretchr/testify/suite" +) + +type CheckDnsAvailabilityTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *CheckDnsAvailabilityTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/network/armnetwork/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name +} + +func (testsuite *CheckDnsAvailabilityTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestCheckDnsAvailabilityTestSuite(t *testing.T) { + suite.Run(t, new(CheckDnsAvailabilityTestSuite)) +} + +// Microsoft.Network/locations/{location}/CheckDnsNameAvailability +func (testsuite *CheckDnsAvailabilityTestSuite) TestCheckDnsNameAvailability() { + var err error + // From step CheckDnsNameAvailability + fmt.Println("Call operation: NetworkManagementClient_CheckDNSNameAvailability") + managementClient, err := armnetwork.NewManagementClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + _, err = managementClient.CheckDNSNameAvailability(testsuite.ctx, testsuite.location, "testdns", nil) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/network/armnetwork/customipprefix_live_test.go b/sdk/resourcemanager/network/armnetwork/customipprefix_live_test.go new file mode 100644 index 000000000000..4471382ba422 --- /dev/null +++ b/sdk/resourcemanager/network/armnetwork/customipprefix_live_test.go @@ -0,0 +1,109 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armnetwork_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2" + "github.com/stretchr/testify/suite" +) + +type CustomIpPrefixTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + customIpPrefixName string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *CustomIpPrefixTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/network/armnetwork/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.customIpPrefixName = testutil.GenerateAlphaNumericID(testsuite.T(), "customippr", 6) + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name +} + +func (testsuite *CustomIpPrefixTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestCustomIpPrefixTestSuite(t *testing.T) { + suite.Run(t, new(CustomIpPrefixTestSuite)) +} + +// Microsoft.Network/customIpPrefixes/{customIpPrefixName} +func (testsuite *CustomIpPrefixTestSuite) TestCustomIpPrefixes() { + var err error + // From step CustomIPPrefixes_CreateOrUpdate + fmt.Println("Call operation: CustomIPPrefixes_CreateOrUpdate") + customIPPrefixesClient, err := armnetwork.NewCustomIPPrefixesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + customIPPrefixesClientCreateOrUpdateResponsePoller, err := customIPPrefixesClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.customIpPrefixName, armnetwork.CustomIPPrefix{ + Location: to.Ptr(testsuite.location), + Properties: &armnetwork.CustomIPPrefixPropertiesFormat{ + Cidr: to.Ptr("0.0.0.0/24"), + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, customIPPrefixesClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step CustomIPPrefixes_ListAll + fmt.Println("Call operation: CustomIPPrefixes_ListAll") + customIPPrefixesClientNewListAllPager := customIPPrefixesClient.NewListAllPager(nil) + for customIPPrefixesClientNewListAllPager.More() { + _, err := customIPPrefixesClientNewListAllPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step CustomIPPrefixes_List + fmt.Println("Call operation: CustomIPPrefixes_List") + customIPPrefixesClientNewListPager := customIPPrefixesClient.NewListPager(testsuite.resourceGroupName, nil) + for customIPPrefixesClientNewListPager.More() { + _, err := customIPPrefixesClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step CustomIPPrefixes_Get + fmt.Println("Call operation: CustomIPPrefixes_Get") + _, err = customIPPrefixesClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.customIpPrefixName, &armnetwork.CustomIPPrefixesClientGetOptions{Expand: nil}) + testsuite.Require().NoError(err) + + // From step CustomIPPrefixes_UpdateTags + fmt.Println("Call operation: CustomIPPrefixes_UpdateTags") + _, err = customIPPrefixesClient.UpdateTags(testsuite.ctx, testsuite.resourceGroupName, testsuite.customIpPrefixName, armnetwork.TagsObject{ + Tags: map[string]*string{ + "tag1": to.Ptr("value1"), + "tag2": to.Ptr("value2"), + }, + }, nil) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/network/armnetwork/ddosprotectionplan_live_test.go b/sdk/resourcemanager/network/armnetwork/ddosprotectionplan_live_test.go new file mode 100644 index 000000000000..03e8f5fe50f7 --- /dev/null +++ b/sdk/resourcemanager/network/armnetwork/ddosprotectionplan_live_test.go @@ -0,0 +1,114 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armnetwork_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2" + "github.com/stretchr/testify/suite" +) + +type DdosProtectionPlanTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + ddosProtectionPlanName string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *DdosProtectionPlanTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/network/armnetwork/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.ddosProtectionPlanName = testutil.GenerateAlphaNumericID(testsuite.T(), "ddosprotec", 6) + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name +} + +func (testsuite *DdosProtectionPlanTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestDdosProtectionPlanTestSuite(t *testing.T) { + suite.Run(t, new(DdosProtectionPlanTestSuite)) +} + +// Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName} +func (testsuite *DdosProtectionPlanTestSuite) TestDdosProtectionPlans() { + var err error + // From step DdosProtectionPlans_CreateOrUpdate + fmt.Println("Call operation: DdosProtectionPlans_CreateOrUpdate") + ddosProtectionPlansClient, err := armnetwork.NewDdosProtectionPlansClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + ddosProtectionPlansClientCreateOrUpdateResponsePoller, err := ddosProtectionPlansClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.ddosProtectionPlanName, armnetwork.DdosProtectionPlan{ + Location: to.Ptr(testsuite.location), + Properties: &armnetwork.DdosProtectionPlanPropertiesFormat{}, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, ddosProtectionPlansClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step DdosProtectionPlans_List + fmt.Println("Call operation: DdosProtectionPlans_List") + ddosProtectionPlansClientNewListPager := ddosProtectionPlansClient.NewListPager(nil) + for ddosProtectionPlansClientNewListPager.More() { + _, err := ddosProtectionPlansClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step DdosProtectionPlans_ListByResourceGroup + fmt.Println("Call operation: DdosProtectionPlans_ListByResourceGroup") + ddosProtectionPlansClientNewListByResourceGroupPager := ddosProtectionPlansClient.NewListByResourceGroupPager(testsuite.resourceGroupName, nil) + for ddosProtectionPlansClientNewListByResourceGroupPager.More() { + _, err := ddosProtectionPlansClientNewListByResourceGroupPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step DdosProtectionPlans_Get + fmt.Println("Call operation: DdosProtectionPlans_Get") + _, err = ddosProtectionPlansClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.ddosProtectionPlanName, nil) + testsuite.Require().NoError(err) + + // From step DdosProtectionPlans_UpdateTags + fmt.Println("Call operation: DdosProtectionPlans_UpdateTags") + _, err = ddosProtectionPlansClient.UpdateTags(testsuite.ctx, testsuite.resourceGroupName, testsuite.ddosProtectionPlanName, armnetwork.TagsObject{ + Tags: map[string]*string{ + "tag1": to.Ptr("value1"), + "tag2": to.Ptr("value2"), + }, + }, nil) + testsuite.Require().NoError(err) + + // From step DdosProtectionPlans_Delete + fmt.Println("Call operation: DdosProtectionPlans_Delete") + ddosProtectionPlansClientDeleteResponsePoller, err := ddosProtectionPlansClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.ddosProtectionPlanName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, ddosProtectionPlansClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/network/armnetwork/endpointservice_live_test.go b/sdk/resourcemanager/network/armnetwork/endpointservice_live_test.go new file mode 100644 index 000000000000..a59f57456946 --- /dev/null +++ b/sdk/resourcemanager/network/armnetwork/endpointservice_live_test.go @@ -0,0 +1,70 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armnetwork_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2" + "github.com/stretchr/testify/suite" +) + +type EndpointServiceTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *EndpointServiceTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/network/armnetwork/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name +} + +func (testsuite *EndpointServiceTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestEndpointServiceTestSuite(t *testing.T) { + suite.Run(t, new(EndpointServiceTestSuite)) +} + +// Microsoft.Network/locations/{location}/virtualNetworkAvailableEndpointServices +func (testsuite *EndpointServiceTestSuite) TestAvailableEndpointServices() { + var err error + // From step AvailableEndpointServices_List + fmt.Println("Call operation: AvailableEndpointServices_List") + availableEndpointServicesClient, err := armnetwork.NewAvailableEndpointServicesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + availableEndpointServicesClientNewListPager := availableEndpointServicesClient.NewListPager(testsuite.location, nil) + for availableEndpointServicesClientNewListPager.More() { + _, err := availableEndpointServicesClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } +} diff --git a/sdk/resourcemanager/network/armnetwork/expressroutecircuit_live_test.go b/sdk/resourcemanager/network/armnetwork/expressroutecircuit_live_test.go new file mode 100644 index 000000000000..f9862165100c --- /dev/null +++ b/sdk/resourcemanager/network/armnetwork/expressroutecircuit_live_test.go @@ -0,0 +1,251 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armnetwork_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2" + "github.com/stretchr/testify/suite" +) + +type ExpressRouteCircuitTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + circuitName string + connectionName string + peeringName string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *ExpressRouteCircuitTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/network/armnetwork/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.circuitName = testutil.GenerateAlphaNumericID(testsuite.T(), "circuitnam", 6) + testsuite.connectionName = testutil.GenerateAlphaNumericID(testsuite.T(), "connerc", 6) + testsuite.peeringName = "AzurePrivatePeering" + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name + testsuite.Prepare() +} + +func (testsuite *ExpressRouteCircuitTestSuite) TearDownSuite() { + testsuite.Cleanup() + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestExpressRouteCircuitTestSuite(t *testing.T) { + suite.Run(t, new(ExpressRouteCircuitTestSuite)) +} + +func (testsuite *ExpressRouteCircuitTestSuite) Prepare() { + var err error + // From step ExpressRouteCircuits_CreateOrUpdate + fmt.Println("Call operation: ExpressRouteCircuits_CreateOrUpdate") + expressRouteCircuitsClient, err := armnetwork.NewExpressRouteCircuitsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + expressRouteCircuitsClientCreateOrUpdateResponsePoller, err := expressRouteCircuitsClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.circuitName, armnetwork.ExpressRouteCircuit{ + Location: to.Ptr(testsuite.location), + Properties: &armnetwork.ExpressRouteCircuitPropertiesFormat{ + AllowClassicOperations: to.Ptr(false), + Authorizations: []*armnetwork.ExpressRouteCircuitAuthorization{}, + Peerings: []*armnetwork.ExpressRouteCircuitPeering{}, + ServiceProviderProperties: &armnetwork.ExpressRouteCircuitServiceProviderProperties{ + BandwidthInMbps: to.Ptr[int32](200), + PeeringLocation: to.Ptr("Silicon Valley"), + ServiceProviderName: to.Ptr("Equinix"), + }, + }, + SKU: &armnetwork.ExpressRouteCircuitSKU{ + Name: to.Ptr("Standard_MeteredData"), + Family: to.Ptr(armnetwork.ExpressRouteCircuitSKUFamilyMeteredData), + Tier: to.Ptr(armnetwork.ExpressRouteCircuitSKUTierStandard), + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, expressRouteCircuitsClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step ExpressRouteCircuitPeerings_CreateOrUpdate + fmt.Println("Call operation: ExpressRouteCircuitPeerings_CreateOrUpdate") + expressRouteCircuitPeeringsClient, err := armnetwork.NewExpressRouteCircuitPeeringsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + expressRouteCircuitPeeringsClientCreateOrUpdateResponsePoller, err := expressRouteCircuitPeeringsClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.circuitName, testsuite.peeringName, armnetwork.ExpressRouteCircuitPeering{ + Properties: &armnetwork.ExpressRouteCircuitPeeringPropertiesFormat{ + PeerASN: to.Ptr[int64](200), + PrimaryPeerAddressPrefix: to.Ptr("192.168.16.252/30"), + SecondaryPeerAddressPrefix: to.Ptr("192.168.18.252/30"), + VlanID: to.Ptr[int32](200), + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, expressRouteCircuitPeeringsClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) +} + +// Microsoft.Network/expressRouteCircuits/{circuitName} +func (testsuite *ExpressRouteCircuitTestSuite) TestExpressRouteCircuits() { + var err error + // From step ExpressRouteCircuits_ListAll + fmt.Println("Call operation: ExpressRouteCircuits_ListAll") + expressRouteCircuitsClient, err := armnetwork.NewExpressRouteCircuitsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + expressRouteCircuitsClientNewListAllPager := expressRouteCircuitsClient.NewListAllPager(nil) + for expressRouteCircuitsClientNewListAllPager.More() { + _, err := expressRouteCircuitsClientNewListAllPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step ExpressRouteCircuits_List + fmt.Println("Call operation: ExpressRouteCircuits_List") + expressRouteCircuitsClientNewListPager := expressRouteCircuitsClient.NewListPager(testsuite.resourceGroupName, nil) + for expressRouteCircuitsClientNewListPager.More() { + _, err := expressRouteCircuitsClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step ExpressRouteCircuits_Get + fmt.Println("Call operation: ExpressRouteCircuits_Get") + _, err = expressRouteCircuitsClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.circuitName, nil) + testsuite.Require().NoError(err) + + // From step ExpressRouteCircuits_GetStats + fmt.Println("Call operation: ExpressRouteCircuits_GetStats") + _, err = expressRouteCircuitsClient.GetStats(testsuite.ctx, testsuite.resourceGroupName, testsuite.circuitName, nil) + testsuite.Require().NoError(err) + + // From step ExpressRouteCircuits_UpdateTags + fmt.Println("Call operation: ExpressRouteCircuits_UpdateTags") + _, err = expressRouteCircuitsClient.UpdateTags(testsuite.ctx, testsuite.resourceGroupName, testsuite.circuitName, armnetwork.TagsObject{ + Tags: map[string]*string{ + "tag1": to.Ptr("value1"), + "tag2": to.Ptr("value2"), + }, + }, nil) + testsuite.Require().NoError(err) + + // From step ExpressRouteCircuits_GetPeeringStats + fmt.Println("Call operation: ExpressRouteCircuits_GetPeeringStats") + _, err = expressRouteCircuitsClient.GetPeeringStats(testsuite.ctx, testsuite.resourceGroupName, testsuite.circuitName, testsuite.peeringName, nil) + testsuite.Require().NoError(err) +} + +// Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName} +func (testsuite *ExpressRouteCircuitTestSuite) TestExpressRouteCircuitPeerings() { + var err error + // From step ExpressRouteCircuitPeerings_List + fmt.Println("Call operation: ExpressRouteCircuitPeerings_List") + expressRouteCircuitPeeringsClient, err := armnetwork.NewExpressRouteCircuitPeeringsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + expressRouteCircuitPeeringsClientNewListPager := expressRouteCircuitPeeringsClient.NewListPager(testsuite.resourceGroupName, testsuite.circuitName, nil) + for expressRouteCircuitPeeringsClientNewListPager.More() { + _, err := expressRouteCircuitPeeringsClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step ExpressRouteCircuitPeerings_Get + fmt.Println("Call operation: ExpressRouteCircuitPeerings_Get") + _, err = expressRouteCircuitPeeringsClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.circuitName, testsuite.peeringName, nil) + testsuite.Require().NoError(err) +} + +// Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName} +func (testsuite *ExpressRouteCircuitTestSuite) TestExpressRouteCircuitAuthorizations() { + authorizationName := "ercauthorization" + var err error + // From step ExpressRouteCircuitAuthorizations_CreateOrUpdate + fmt.Println("Call operation: ExpressRouteCircuitAuthorizations_CreateOrUpdate") + expressRouteCircuitAuthorizationsClient, err := armnetwork.NewExpressRouteCircuitAuthorizationsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + expressRouteCircuitAuthorizationsClientCreateOrUpdateResponsePoller, err := expressRouteCircuitAuthorizationsClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.circuitName, authorizationName, armnetwork.ExpressRouteCircuitAuthorization{ + Properties: &armnetwork.AuthorizationPropertiesFormat{}, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, expressRouteCircuitAuthorizationsClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step ExpressRouteCircuitAuthorizations_List + fmt.Println("Call operation: ExpressRouteCircuitAuthorizations_List") + expressRouteCircuitAuthorizationsClientNewListPager := expressRouteCircuitAuthorizationsClient.NewListPager(testsuite.resourceGroupName, testsuite.circuitName, nil) + for expressRouteCircuitAuthorizationsClientNewListPager.More() { + _, err := expressRouteCircuitAuthorizationsClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step ExpressRouteCircuitAuthorizations_Get + fmt.Println("Call operation: ExpressRouteCircuitAuthorizations_Get") + _, err = expressRouteCircuitAuthorizationsClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.circuitName, authorizationName, nil) + testsuite.Require().NoError(err) + + // From step ExpressRouteCircuitAuthorizations_Delete + fmt.Println("Call operation: ExpressRouteCircuitAuthorizations_Delete") + expressRouteCircuitAuthorizationsClientDeleteResponsePoller, err := expressRouteCircuitAuthorizationsClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.circuitName, authorizationName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, expressRouteCircuitAuthorizationsClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} + +// Microsoft.Network/expressRouteServiceProviders +func (testsuite *ExpressRouteCircuitTestSuite) TestExpressRouteServiceProviders() { + var err error + // From step ExpressRouteServiceProviders_List + fmt.Println("Call operation: ExpressRouteServiceProviders_List") + expressRouteServiceProvidersClient, err := armnetwork.NewExpressRouteServiceProvidersClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + expressRouteServiceProvidersClientNewListPager := expressRouteServiceProvidersClient.NewListPager(nil) + for expressRouteServiceProvidersClientNewListPager.More() { + _, err := expressRouteServiceProvidersClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } +} + +func (testsuite *ExpressRouteCircuitTestSuite) Cleanup() { + var err error + // From step ExpressRouteCircuitPeerings_Delete + fmt.Println("Call operation: ExpressRouteCircuitPeerings_Delete") + expressRouteCircuitPeeringsClient, err := armnetwork.NewExpressRouteCircuitPeeringsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + expressRouteCircuitPeeringsClientDeleteResponsePoller, err := expressRouteCircuitPeeringsClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.circuitName, testsuite.peeringName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, expressRouteCircuitPeeringsClientDeleteResponsePoller) + testsuite.Require().NoError(err) + + // From step ExpressRouteCircuits_Delete + fmt.Println("Call operation: ExpressRouteCircuits_Delete") + expressRouteCircuitsClient, err := armnetwork.NewExpressRouteCircuitsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + expressRouteCircuitsClientDeleteResponsePoller, err := expressRouteCircuitsClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.circuitName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, expressRouteCircuitsClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/network/armnetwork/firewallpolicy_live_test.go b/sdk/resourcemanager/network/armnetwork/firewallpolicy_live_test.go new file mode 100644 index 000000000000..da077461f42c --- /dev/null +++ b/sdk/resourcemanager/network/armnetwork/firewallpolicy_live_test.go @@ -0,0 +1,176 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armnetwork_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2" + "github.com/stretchr/testify/suite" +) + +type FirewallPolicyTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + firewallPolicyName string + ruleCollectionGroupName string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *FirewallPolicyTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/network/armnetwork/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.firewallPolicyName = testutil.GenerateAlphaNumericID(testsuite.T(), "firewallpo", 6) + testsuite.ruleCollectionGroupName = testutil.GenerateAlphaNumericID(testsuite.T(), "rulecollec", 6) + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name + testsuite.Prepare() +} + +func (testsuite *FirewallPolicyTestSuite) TearDownSuite() { + testsuite.Cleanup() + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestFirewallPolicyTestSuite(t *testing.T) { + suite.Run(t, new(FirewallPolicyTestSuite)) +} + +func (testsuite *FirewallPolicyTestSuite) Prepare() { + var err error + // From step FirewallPolicies_CreateOrUpdate + fmt.Println("Call operation: FirewallPolicies_CreateOrUpdate") + firewallPoliciesClient, err := armnetwork.NewFirewallPoliciesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + firewallPoliciesClientCreateOrUpdateResponsePoller, err := firewallPoliciesClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.firewallPolicyName, armnetwork.FirewallPolicy{ + Location: to.Ptr(testsuite.location), + Properties: &armnetwork.FirewallPolicyPropertiesFormat{ + SKU: &armnetwork.FirewallPolicySKU{ + Tier: to.Ptr(armnetwork.FirewallPolicySKUTierPremium), + }, + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, firewallPoliciesClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) +} + +// Microsoft.Network/firewallPolicies/{firewallPolicyName} +func (testsuite *FirewallPolicyTestSuite) TestFirewallPolicies() { + var err error + // From step FirewallPolicies_ListAll + fmt.Println("Call operation: FirewallPolicies_ListAll") + firewallPoliciesClient, err := armnetwork.NewFirewallPoliciesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + firewallPoliciesClientNewListAllPager := firewallPoliciesClient.NewListAllPager(nil) + for firewallPoliciesClientNewListAllPager.More() { + _, err := firewallPoliciesClientNewListAllPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step FirewallPolicies_List + fmt.Println("Call operation: FirewallPolicies_List") + firewallPoliciesClientNewListPager := firewallPoliciesClient.NewListPager(testsuite.resourceGroupName, nil) + for firewallPoliciesClientNewListPager.More() { + _, err := firewallPoliciesClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step FirewallPolicies_Get + fmt.Println("Call operation: FirewallPolicies_Get") + _, err = firewallPoliciesClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.firewallPolicyName, &armnetwork.FirewallPoliciesClientGetOptions{Expand: nil}) + testsuite.Require().NoError(err) + + // From step FirewallPolicies_UpdateTags + fmt.Println("Call operation: FirewallPolicies_UpdateTags") + _, err = firewallPoliciesClient.UpdateTags(testsuite.ctx, testsuite.resourceGroupName, testsuite.firewallPolicyName, armnetwork.TagsObject{ + Tags: map[string]*string{ + "key1": to.Ptr("value1"), + "key2": to.Ptr("value2"), + }, + }, nil) + testsuite.Require().NoError(err) +} + +// Microsoft.Network/firewallPolicies/{firewallPolicyName}/ruleCollectionGroups/{ruleCollectionGroupName} +func (testsuite *FirewallPolicyTestSuite) TestFirewallPolicyRuleCollectionGroups() { + var err error + // From step FirewallPolicyRuleCollectionGroups_CreateOrUpdate + fmt.Println("Call operation: FirewallPolicyRuleCollectionGroups_CreateOrUpdate") + firewallPolicyRuleCollectionGroupsClient, err := armnetwork.NewFirewallPolicyRuleCollectionGroupsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + firewallPolicyRuleCollectionGroupsClientCreateOrUpdateResponsePoller, err := firewallPolicyRuleCollectionGroupsClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.firewallPolicyName, testsuite.ruleCollectionGroupName, armnetwork.FirewallPolicyRuleCollectionGroup{ + Properties: &armnetwork.FirewallPolicyRuleCollectionGroupProperties{ + Priority: to.Ptr[int32](100), + RuleCollections: []armnetwork.FirewallPolicyRuleCollectionClassification{ + &armnetwork.FirewallPolicyFilterRuleCollection{ + Name: to.Ptr("Example-Filter-Rule-Collection"), + Priority: to.Ptr[int32](100), + RuleCollectionType: to.Ptr(armnetwork.FirewallPolicyRuleCollectionTypeFirewallPolicyFilterRuleCollection), + }}, + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, firewallPolicyRuleCollectionGroupsClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step FirewallPolicyRuleCollectionGroups_List + fmt.Println("Call operation: FirewallPolicyRuleCollectionGroups_List") + firewallPolicyRuleCollectionGroupsClientNewListPager := firewallPolicyRuleCollectionGroupsClient.NewListPager(testsuite.resourceGroupName, testsuite.firewallPolicyName, nil) + for firewallPolicyRuleCollectionGroupsClientNewListPager.More() { + _, err := firewallPolicyRuleCollectionGroupsClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step FirewallPolicyRuleCollectionGroups_Get + fmt.Println("Call operation: FirewallPolicyRuleCollectionGroups_Get") + _, err = firewallPolicyRuleCollectionGroupsClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.firewallPolicyName, testsuite.ruleCollectionGroupName, nil) + testsuite.Require().NoError(err) + + // From step FirewallPolicyRuleCollectionGroups_Delete + fmt.Println("Call operation: FirewallPolicyRuleCollectionGroups_Delete") + firewallPolicyRuleCollectionGroupsClientDeleteResponsePoller, err := firewallPolicyRuleCollectionGroupsClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.firewallPolicyName, testsuite.ruleCollectionGroupName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, firewallPolicyRuleCollectionGroupsClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} + +func (testsuite *FirewallPolicyTestSuite) Cleanup() { + var err error + // From step FirewallPolicies_Delete + fmt.Println("Call operation: FirewallPolicies_Delete") + firewallPoliciesClient, err := armnetwork.NewFirewallPoliciesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + firewallPoliciesClientDeleteResponsePoller, err := firewallPoliciesClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.firewallPolicyName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, firewallPoliciesClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/network/armnetwork/go.mod b/sdk/resourcemanager/network/armnetwork/go.mod index 2c652bfc25eb..a2b1e1571462 100644 --- a/sdk/resourcemanager/network/armnetwork/go.mod +++ b/sdk/resourcemanager/network/armnetwork/go.mod @@ -3,27 +3,27 @@ module github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/ go 1.18 require ( - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.5.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.2 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.1.2 - github.com/stretchr/testify v1.7.0 + github.com/stretchr/testify v1.8.2 ) require ( - github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.0.0 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v0.9.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dnaeon/go-vcr v1.1.0 // indirect - github.com/golang-jwt/jwt/v4 v4.4.2 // indirect - github.com/google/uuid v1.1.1 // indirect + github.com/golang-jwt/jwt/v4 v4.5.0 // indirect + github.com/google/uuid v1.3.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect - github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 // indirect + github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88 // indirect - golang.org/x/net v0.7.0 // indirect - golang.org/x/sys v0.5.0 // indirect - golang.org/x/text v0.7.0 // indirect + golang.org/x/crypto v0.6.0 // indirect + golang.org/x/net v0.8.0 // indirect + golang.org/x/sys v0.6.0 // indirect + golang.org/x/text v0.8.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/sdk/resourcemanager/network/armnetwork/go.sum b/sdk/resourcemanager/network/armnetwork/go.sum index b0f97586a165..6befdf11bc6e 100644 --- a/sdk/resourcemanager/network/armnetwork/go.sum +++ b/sdk/resourcemanager/network/armnetwork/go.sum @@ -1,47 +1,52 @@ -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0 h1:rTnT/Jrcm+figWlYz4Ixzt0SJVR2cMC8lvZcimipiEY= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0/go.mod h1:ON4tFdPTwRcgWEaVDrN3584Ef+b7GgSJaXxe5fW9t4M= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0 h1:t/W5MYAuQy81cvM8VUNfRLzhtKpXhVUAN7Cd7KVbTyc= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0/go.mod h1:NBanQUfSWiWn3QEpWDTCU0IjBECKOYvl2R8xdRtMtiM= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0 h1:leh5DwKv6Ihwi+h60uHtn6UWAxBbZ0q8DwQVMzf61zw= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.5.0 h1:xGLAFFd9D3iLGxYiUGPdITSzsFmU1K8VtfuUHWAoN7M= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.5.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.2 h1:uqM+VoHjVH6zdlkLF2b6O0ZANcHoj3rO0PoQ3jglUJA= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.2/go.mod h1:twTKAa1E6hLmSDjLhaCkbTMQKc7p/rNLU40rLxGEOCI= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.1.2 h1:mLY+pNLjCUeKhgnAJWAKhEUQM+RJQo2H1fuGSw1Ky1E= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.1.2/go.mod h1:FbdwsQ2EzwvXxOPcMFYO8ogEc9uMMIj3YkmCdXdAFmk= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.0.0 h1:ECsQtyERDVz3NP3kvDOTLvbQhqWp/x9EsGKtb4ogUr8= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.0.0/go.mod h1:s1tW/At+xHqjNFvWU4G0c0Qv33KOhvbGNj0RCTQDV8s= -github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0 h1:VgSJlZH5u0k2qxSpqyghcFQKmvYckj46uymKK5XzkBM= -github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0/go.mod h1:BDJ5qMFKx9DugEg3+uQSDCdbYPr5s9vBTrL9P8TpqOU= +github.com/AzureAD/microsoft-authentication-library-for-go v0.9.0 h1:UE9n9rkJF62ArLb1F3DEjRt8O3jLwMWdSoypKV4f3MU= +github.com/AzureAD/microsoft-authentication-library-for-go v0.9.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c= github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko= -github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs= -github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= -github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 h1:Qj1ukM4GlMWXNdMBuXcXfz/Kw9s1qm0CLY32QxuSImI= -github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88 h1:Tgea0cVUD0ivh5ADBX4WwuI12DUd2to3nCYe2eayMIw= -golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/sdk/resourcemanager/network/armnetwork/ipgroups_client_live_test.go b/sdk/resourcemanager/network/armnetwork/ipgroups_client_live_test.go deleted file mode 100644 index 8de9a0c68bea..000000000000 --- a/sdk/resourcemanager/network/armnetwork/ipgroups_client_live_test.go +++ /dev/null @@ -1,116 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package armnetwork_test - -import ( - "context" - "fmt" - "testing" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2" - "github.com/stretchr/testify/suite" -) - -type IPGroupsClientTestSuite struct { - suite.Suite - - ctx context.Context - cred azcore.TokenCredential - options *arm.ClientOptions - location string - resourceGroupName string - subscriptionID string -} - -func (testsuite *IPGroupsClientTestSuite) SetupSuite() { - testsuite.ctx = context.Background() - testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) - testsuite.location = testutil.GetEnv("LOCATION", "eastus") - testsuite.subscriptionID = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") - testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/network/armnetwork/testdata") - resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionID, testsuite.cred, testsuite.options, testsuite.location) - testsuite.Require().NoError(err) - testsuite.resourceGroupName = *resourceGroup.Name -} - -func (testsuite *IPGroupsClientTestSuite) TearDownSuite() { - _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionID, testsuite.cred, testsuite.options, testsuite.resourceGroupName) - testsuite.Require().NoError(err) - testutil.StopRecording(testsuite.T()) -} - -func TestIPGroupsClient(t *testing.T) { - suite.Run(t, new(IPGroupsClientTestSuite)) -} - -func (testsuite *IPGroupsClientTestSuite) TestIPGroupsCRUD() { - // create ip group - fmt.Println("Call operation: IpGroups_CreateOrUpdate") - ipgClient, err := armnetwork.NewIPGroupsClient(testsuite.subscriptionID, testsuite.cred, testsuite.options) - testsuite.Require().NoError(err) - ipgName := "go-test-ipg" - ipgPoller, err := ipgClient.BeginCreateOrUpdate( - testsuite.ctx, - testsuite.resourceGroupName, - ipgName, - armnetwork.IPGroup{ - Location: to.Ptr(testsuite.location), - Properties: &armnetwork.IPGroupPropertiesFormat{ - IPAddresses: []*string{ - to.Ptr("13.64.39.16/32"), - to.Ptr("40.74.146.80/31"), - to.Ptr("40.74.147.32/28"), - }, - }, - }, - nil, - ) - testsuite.Require().NoError(err) - resp, err := testutil.PollForTest(testsuite.ctx, ipgPoller) - testsuite.Require().NoError(err) - testsuite.Require().Equal(ipgName, *resp.Name) - - // update - fmt.Println("Call operation: IpGroups_UpdateGroups") - updateResp, err := ipgClient.UpdateGroups( - testsuite.ctx, - testsuite.resourceGroupName, - ipgName, - armnetwork.TagsObject{ - Tags: map[string]*string{ - "test": to.Ptr("live"), - }, - }, - nil, - ) - testsuite.Require().NoError(err) - testsuite.Require().Equal("live", *updateResp.Tags["test"]) - - // get ip group - fmt.Println("Call operation: IpGroups_Get") - getResp, err := ipgClient.Get(testsuite.ctx, testsuite.resourceGroupName, ipgName, nil) - testsuite.Require().NoError(err) - testsuite.Require().Equal(ipgName, *getResp.Name) - - // list ip group - fmt.Println("Call operation: IpGroups_List") - listPager := ipgClient.NewListPager(nil) - testsuite.Require().True(listPager.More()) - - // delete ip group - fmt.Println("Call operation: IpGroups_Delete") - delPoller, err := ipgClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, ipgName, nil) - testsuite.Require().NoError(err) - delResp, err := testutil.PollForTest(testsuite.ctx, delPoller) - testsuite.Require().NoError(err) - //testsuite.Require().Equal(200, delResp.RawResponse.StatusCode) - _ = delResp -} diff --git a/sdk/resourcemanager/network/armnetwork/ipgroups_live_test.go b/sdk/resourcemanager/network/armnetwork/ipgroups_live_test.go new file mode 100644 index 000000000000..dfbe0b8e9287 --- /dev/null +++ b/sdk/resourcemanager/network/armnetwork/ipgroups_live_test.go @@ -0,0 +1,122 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armnetwork_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2" + "github.com/stretchr/testify/suite" +) + +type IpGroupsTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + ipGroupsName string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *IpGroupsTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/network/armnetwork/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.ipGroupsName = testutil.GenerateAlphaNumericID(testsuite.T(), "ipgroupsna", 6) + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name +} + +func (testsuite *IpGroupsTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestIpGroupsTestSuite(t *testing.T) { + suite.Run(t, new(IpGroupsTestSuite)) +} + +// Microsoft.Network/ipGroups/{ipGroupsName} +func (testsuite *IpGroupsTestSuite) TestIpGroups() { + var err error + // From step IpGroups_CreateOrUpdate + fmt.Println("Call operation: IPGroups_CreateOrUpdate") + iPGroupsClient, err := armnetwork.NewIPGroupsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + iPGroupsClientCreateOrUpdateResponsePoller, err := iPGroupsClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.ipGroupsName, armnetwork.IPGroup{ + Location: to.Ptr(testsuite.location), + Tags: map[string]*string{ + "key1": to.Ptr("value1"), + }, + Properties: &armnetwork.IPGroupPropertiesFormat{ + IPAddresses: []*string{ + to.Ptr("13.64.39.16/32"), + to.Ptr("40.74.146.80/31"), + to.Ptr("40.74.147.32/28")}, + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, iPGroupsClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step IpGroups_List + fmt.Println("Call operation: IPGroups_List") + iPGroupsClientNewListPager := iPGroupsClient.NewListPager(nil) + for iPGroupsClientNewListPager.More() { + _, err := iPGroupsClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step IpGroups_ListByResourceGroup + fmt.Println("Call operation: IPGroups_ListByResourceGroup") + iPGroupsClientNewListByResourceGroupPager := iPGroupsClient.NewListByResourceGroupPager(testsuite.resourceGroupName, nil) + for iPGroupsClientNewListByResourceGroupPager.More() { + _, err := iPGroupsClientNewListByResourceGroupPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step IpGroups_Get + fmt.Println("Call operation: IPGroups_Get") + _, err = iPGroupsClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.ipGroupsName, &armnetwork.IPGroupsClientGetOptions{Expand: nil}) + testsuite.Require().NoError(err) + + // From step IpGroups_UpdateGroups + fmt.Println("Call operation: IPGroups_UpdateGroups") + _, err = iPGroupsClient.UpdateGroups(testsuite.ctx, testsuite.resourceGroupName, testsuite.ipGroupsName, armnetwork.TagsObject{ + Tags: map[string]*string{ + "key1": to.Ptr("value1"), + "key2": to.Ptr("value2"), + }, + }, nil) + testsuite.Require().NoError(err) + + // From step IpGroups_Delete + fmt.Println("Call operation: IPGroups_Delete") + iPGroupsClientDeleteResponsePoller, err := iPGroupsClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.ipGroupsName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, iPGroupsClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/network/armnetwork/loadbalancer_live_test.go b/sdk/resourcemanager/network/armnetwork/loadbalancer_live_test.go new file mode 100644 index 000000000000..f6028154103a --- /dev/null +++ b/sdk/resourcemanager/network/armnetwork/loadbalancer_live_test.go @@ -0,0 +1,335 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armnetwork_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2" + "github.com/stretchr/testify/suite" +) + +type LoadBalancerTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + backendAddressPoolName string + frontendIPConfigurationId string + inboundNatRuleName string + loadBalancerName string + subnetId string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *LoadBalancerTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/network/armnetwork/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.backendAddressPoolName = testutil.GenerateAlphaNumericID(testsuite.T(), "backendadd", 6) + testsuite.inboundNatRuleName = testutil.GenerateAlphaNumericID(testsuite.T(), "inboundnat", 6) + testsuite.loadBalancerName = testutil.GenerateAlphaNumericID(testsuite.T(), "loadbalanc", 6) + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name + testsuite.Prepare() +} + +func (testsuite *LoadBalancerTestSuite) TearDownSuite() { + testsuite.Cleanup() + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestLoadBalancerTestSuite(t *testing.T) { + suite.Run(t, new(LoadBalancerTestSuite)) +} + +func (testsuite *LoadBalancerTestSuite) Prepare() { + var err error + // From step VirtualNetworks_CreateOrUpdate + fmt.Println("Call operation: VirtualNetworks_CreateOrUpdate") + virtualNetworksClient, err := armnetwork.NewVirtualNetworksClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + virtualNetworksClientCreateOrUpdateResponsePoller, err := virtualNetworksClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, "test-vnet", armnetwork.VirtualNetwork{ + Location: to.Ptr(testsuite.location), + Properties: &armnetwork.VirtualNetworkPropertiesFormat{ + AddressSpace: &armnetwork.AddressSpace{ + AddressPrefixes: []*string{ + to.Ptr("10.0.0.0/16")}, + }, + Subnets: []*armnetwork.Subnet{ + { + Name: to.Ptr("test-1"), + Properties: &armnetwork.SubnetPropertiesFormat{ + AddressPrefix: to.Ptr("10.0.0.0/24"), + }, + }}, + }, + }, nil) + testsuite.Require().NoError(err) + var virtualNetworksClientCreateOrUpdateResponse *armnetwork.VirtualNetworksClientCreateOrUpdateResponse + virtualNetworksClientCreateOrUpdateResponse, err = testutil.PollForTest(testsuite.ctx, virtualNetworksClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + testsuite.subnetId = *virtualNetworksClientCreateOrUpdateResponse.Properties.Subnets[0].ID + + // From step LoadBalancers_CreateOrUpdate + fmt.Println("Call operation: LoadBalancers_CreateOrUpdate") + loadBalancersClient, err := armnetwork.NewLoadBalancersClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + loadBalancersClientCreateOrUpdateResponsePoller, err := loadBalancersClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.loadBalancerName, armnetwork.LoadBalancer{ + Location: to.Ptr(testsuite.location), + Properties: &armnetwork.LoadBalancerPropertiesFormat{ + FrontendIPConfigurations: []*armnetwork.FrontendIPConfiguration{ + { + Name: to.Ptr("frontendipconf"), + Properties: &armnetwork.FrontendIPConfigurationPropertiesFormat{ + Subnet: &armnetwork.Subnet{ + ID: to.Ptr(testsuite.subnetId), + }, + }, + }}, + }, + SKU: &armnetwork.LoadBalancerSKU{ + Name: to.Ptr(armnetwork.LoadBalancerSKUNameStandard), + }, + }, nil) + testsuite.Require().NoError(err) + var loadBalancersClientCreateOrUpdateResponse *armnetwork.LoadBalancersClientCreateOrUpdateResponse + loadBalancersClientCreateOrUpdateResponse, err = testutil.PollForTest(testsuite.ctx, loadBalancersClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + testsuite.frontendIPConfigurationId = *loadBalancersClientCreateOrUpdateResponse.Properties.FrontendIPConfigurations[0].ID +} + +// Microsoft.Network/loadBalancers/{loadBalancerName} +func (testsuite *LoadBalancerTestSuite) TestLoadBalancers() { + var err error + // From step LoadBalancers_ListAll + fmt.Println("Call operation: LoadBalancers_ListAll") + loadBalancersClient, err := armnetwork.NewLoadBalancersClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + loadBalancersClientNewListAllPager := loadBalancersClient.NewListAllPager(nil) + for loadBalancersClientNewListAllPager.More() { + _, err := loadBalancersClientNewListAllPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step LoadBalancers_List + fmt.Println("Call operation: LoadBalancers_List") + loadBalancersClientNewListPager := loadBalancersClient.NewListPager(testsuite.resourceGroupName, nil) + for loadBalancersClientNewListPager.More() { + _, err := loadBalancersClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step LoadBalancers_Get + fmt.Println("Call operation: LoadBalancers_Get") + _, err = loadBalancersClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.loadBalancerName, &armnetwork.LoadBalancersClientGetOptions{Expand: nil}) + testsuite.Require().NoError(err) + + // From step LoadBalancers_UpdateTags + fmt.Println("Call operation: LoadBalancers_UpdateTags") + _, err = loadBalancersClient.UpdateTags(testsuite.ctx, testsuite.resourceGroupName, testsuite.loadBalancerName, armnetwork.TagsObject{ + Tags: map[string]*string{ + "tag1": to.Ptr("value1"), + "tag2": to.Ptr("value2"), + }, + }, nil) + testsuite.Require().NoError(err) +} + +// Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools/{backendAddressPoolName} +func (testsuite *LoadBalancerTestSuite) TestLoadBalancerBackendAddressPools() { + var err error + // From step LoadBalancerBackendAddressPools_CreateOrUpdate + fmt.Println("Call operation: LoadBalancerBackendAddressPools_CreateOrUpdate") + loadBalancerBackendAddressPoolsClient, err := armnetwork.NewLoadBalancerBackendAddressPoolsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + loadBalancerBackendAddressPoolsClientCreateOrUpdateResponsePoller, err := loadBalancerBackendAddressPoolsClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.loadBalancerName, testsuite.backendAddressPoolName, armnetwork.BackendAddressPool{ + Properties: &armnetwork.BackendAddressPoolPropertiesFormat{}, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, loadBalancerBackendAddressPoolsClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step LoadBalancerBackendAddressPools_List + fmt.Println("Call operation: LoadBalancerBackendAddressPools_List") + loadBalancerBackendAddressPoolsClientNewListPager := loadBalancerBackendAddressPoolsClient.NewListPager(testsuite.resourceGroupName, testsuite.loadBalancerName, nil) + for loadBalancerBackendAddressPoolsClientNewListPager.More() { + _, err := loadBalancerBackendAddressPoolsClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step LoadBalancerBackendAddressPools_Get + fmt.Println("Call operation: LoadBalancerBackendAddressPools_Get") + _, err = loadBalancerBackendAddressPoolsClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.loadBalancerName, testsuite.backendAddressPoolName, nil) + testsuite.Require().NoError(err) + + // From step LoadBalancerBackendAddressPools_Delete + fmt.Println("Call operation: LoadBalancerBackendAddressPools_Delete") + loadBalancerBackendAddressPoolsClientDeleteResponsePoller, err := loadBalancerBackendAddressPoolsClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.loadBalancerName, testsuite.backendAddressPoolName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, loadBalancerBackendAddressPoolsClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} + +// Microsoft.Network/loadBalancers/{loadBalancerName}/frontendIPConfigurations +func (testsuite *LoadBalancerTestSuite) TestLoadBalancerFrontendIpConfigurations() { + var err error + // From step LoadBalancerFrontendIPConfigurations_List + fmt.Println("Call operation: LoadBalancerFrontendIPConfigurations_List") + loadBalancerFrontendIPConfigurationsClient, err := armnetwork.NewLoadBalancerFrontendIPConfigurationsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + loadBalancerFrontendIPConfigurationsClientNewListPager := loadBalancerFrontendIPConfigurationsClient.NewListPager(testsuite.resourceGroupName, testsuite.loadBalancerName, nil) + for loadBalancerFrontendIPConfigurationsClientNewListPager.More() { + _, err := loadBalancerFrontendIPConfigurationsClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step LoadBalancerFrontendIPConfigurations_Get + fmt.Println("Call operation: LoadBalancerFrontendIPConfigurations_Get") + _, err = loadBalancerFrontendIPConfigurationsClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.loadBalancerName, "frontendipconf", nil) + testsuite.Require().NoError(err) +} + +// Microsoft.Network/loadBalancers/{loadBalancerName}/inboundNatRules/{inboundNatRuleName} +func (testsuite *LoadBalancerTestSuite) TestInboundNatRules() { + var err error + // From step InboundNatRules_CreateOrUpdate + fmt.Println("Call operation: InboundNatRules_CreateOrUpdate") + inboundNatRulesClient, err := armnetwork.NewInboundNatRulesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + inboundNatRulesClientCreateOrUpdateResponsePoller, err := inboundNatRulesClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.loadBalancerName, testsuite.inboundNatRuleName, armnetwork.InboundNatRule{ + Properties: &armnetwork.InboundNatRulePropertiesFormat{ + BackendPort: to.Ptr[int32](3389), + EnableFloatingIP: to.Ptr(false), + EnableTCPReset: to.Ptr(false), + FrontendIPConfiguration: &armnetwork.SubResource{ + ID: to.Ptr(testsuite.frontendIPConfigurationId), + }, + FrontendPort: to.Ptr[int32](3390), + IdleTimeoutInMinutes: to.Ptr[int32](4), + Protocol: to.Ptr(armnetwork.TransportProtocolTCP), + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, inboundNatRulesClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step InboundNatRules_List + fmt.Println("Call operation: InboundNatRules_List") + inboundNatRulesClientNewListPager := inboundNatRulesClient.NewListPager(testsuite.resourceGroupName, testsuite.loadBalancerName, nil) + for inboundNatRulesClientNewListPager.More() { + _, err := inboundNatRulesClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step InboundNatRules_Get + fmt.Println("Call operation: InboundNatRules_Get") + _, err = inboundNatRulesClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.loadBalancerName, testsuite.inboundNatRuleName, &armnetwork.InboundNatRulesClientGetOptions{Expand: nil}) + testsuite.Require().NoError(err) + + // From step InboundNatRules_Delete + fmt.Println("Call operation: InboundNatRules_Delete") + inboundNatRulesClientDeleteResponsePoller, err := inboundNatRulesClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.loadBalancerName, testsuite.inboundNatRuleName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, inboundNatRulesClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} + +// Microsoft.Network/loadBalancers/{loadBalancerName}/loadBalancingRules +func (testsuite *LoadBalancerTestSuite) TestLoadBalancerLoadBalancingRules() { + var err error + // From step LoadBalancerLoadBalancingRules_List + fmt.Println("Call operation: LoadBalancerLoadBalancingRules_List") + loadBalancerLoadBalancingRulesClient, err := armnetwork.NewLoadBalancerLoadBalancingRulesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + loadBalancerLoadBalancingRulesClientNewListPager := loadBalancerLoadBalancingRulesClient.NewListPager(testsuite.resourceGroupName, testsuite.loadBalancerName, nil) + for loadBalancerLoadBalancingRulesClientNewListPager.More() { + _, err := loadBalancerLoadBalancingRulesClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } +} + +// Microsoft.Network/loadBalancers/{loadBalancerName}/outboundRules +func (testsuite *LoadBalancerTestSuite) TestLoadBalancerOutboundRules() { + var err error + // From step LoadBalancerOutboundRules_List + fmt.Println("Call operation: LoadBalancerOutboundRules_List") + loadBalancerOutboundRulesClient, err := armnetwork.NewLoadBalancerOutboundRulesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + loadBalancerOutboundRulesClientNewListPager := loadBalancerOutboundRulesClient.NewListPager(testsuite.resourceGroupName, testsuite.loadBalancerName, nil) + for loadBalancerOutboundRulesClientNewListPager.More() { + _, err := loadBalancerOutboundRulesClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } +} + +// Microsoft.Network/loadBalancers/{loadBalancerName}/networkInterfaces +func (testsuite *LoadBalancerTestSuite) TestLoadBalancerNetworkInterfaces() { + var err error + // From step LoadBalancerNetworkInterfaces_List + fmt.Println("Call operation: LoadBalancerNetworkInterfaces_List") + loadBalancerNetworkInterfacesClient, err := armnetwork.NewLoadBalancerNetworkInterfacesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + loadBalancerNetworkInterfacesClientNewListPager := loadBalancerNetworkInterfacesClient.NewListPager(testsuite.resourceGroupName, testsuite.loadBalancerName, nil) + for loadBalancerNetworkInterfacesClientNewListPager.More() { + _, err := loadBalancerNetworkInterfacesClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } +} + +// Microsoft.Network/loadBalancers/{loadBalancerName}/probes +func (testsuite *LoadBalancerTestSuite) TestLoadBalancerProbes() { + var err error + // From step LoadBalancerProbes_List + fmt.Println("Call operation: LoadBalancerProbes_List") + loadBalancerProbesClient, err := armnetwork.NewLoadBalancerProbesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + loadBalancerProbesClientNewListPager := loadBalancerProbesClient.NewListPager(testsuite.resourceGroupName, testsuite.loadBalancerName, nil) + for loadBalancerProbesClientNewListPager.More() { + _, err := loadBalancerProbesClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } +} + +func (testsuite *LoadBalancerTestSuite) Cleanup() { + var err error + // From step LoadBalancers_Delete + fmt.Println("Call operation: LoadBalancers_Delete") + loadBalancersClient, err := armnetwork.NewLoadBalancersClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + loadBalancersClientDeleteResponsePoller, err := loadBalancersClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.loadBalancerName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, loadBalancersClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/network/armnetwork/natgateway_live_test.go b/sdk/resourcemanager/network/armnetwork/natgateway_live_test.go new file mode 100644 index 000000000000..5f23690950c6 --- /dev/null +++ b/sdk/resourcemanager/network/armnetwork/natgateway_live_test.go @@ -0,0 +1,116 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armnetwork_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2" + "github.com/stretchr/testify/suite" +) + +type NatGatewayTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + natGatewayName string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *NatGatewayTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/network/armnetwork/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.natGatewayName = testutil.GenerateAlphaNumericID(testsuite.T(), "natgateway", 6) + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name +} + +func (testsuite *NatGatewayTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestNatGatewayTestSuite(t *testing.T) { + suite.Run(t, new(NatGatewayTestSuite)) +} + +// Microsoft.Network/natGateways/{natGatewayName} +func (testsuite *NatGatewayTestSuite) TestNatGateways() { + var err error + // From step NatGateways_CreateOrUpdate + fmt.Println("Call operation: NatGateways_CreateOrUpdate") + natGatewaysClient, err := armnetwork.NewNatGatewaysClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + natGatewaysClientCreateOrUpdateResponsePoller, err := natGatewaysClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.natGatewayName, armnetwork.NatGateway{ + Location: to.Ptr(testsuite.location), + SKU: &armnetwork.NatGatewaySKU{ + Name: to.Ptr(armnetwork.NatGatewaySKUNameStandard), + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, natGatewaysClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step NatGateways_ListAll + fmt.Println("Call operation: NatGateways_ListAll") + natGatewaysClientNewListAllPager := natGatewaysClient.NewListAllPager(nil) + for natGatewaysClientNewListAllPager.More() { + _, err := natGatewaysClientNewListAllPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step NatGateways_Get + fmt.Println("Call operation: NatGateways_Get") + _, err = natGatewaysClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.natGatewayName, &armnetwork.NatGatewaysClientGetOptions{Expand: nil}) + testsuite.Require().NoError(err) + + // From step NatGateways_List + fmt.Println("Call operation: NatGateways_List") + natGatewaysClientNewListPager := natGatewaysClient.NewListPager(testsuite.resourceGroupName, nil) + for natGatewaysClientNewListPager.More() { + _, err := natGatewaysClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step NatGateways_UpdateTags + fmt.Println("Call operation: NatGateways_UpdateTags") + _, err = natGatewaysClient.UpdateTags(testsuite.ctx, testsuite.resourceGroupName, testsuite.natGatewayName, armnetwork.TagsObject{ + Tags: map[string]*string{ + "tag1": to.Ptr("value1"), + "tag2": to.Ptr("value2"), + }, + }, nil) + testsuite.Require().NoError(err) + + // From step NatGateways_Delete + fmt.Println("Call operation: NatGateways_Delete") + natGatewaysClientDeleteResponsePoller, err := natGatewaysClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.natGatewayName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, natGatewaysClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/network/armnetwork/networkinterface_live_test.go b/sdk/resourcemanager/network/armnetwork/networkinterface_live_test.go new file mode 100644 index 000000000000..6dd8fe27300a --- /dev/null +++ b/sdk/resourcemanager/network/armnetwork/networkinterface_live_test.go @@ -0,0 +1,185 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armnetwork_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2" + "github.com/stretchr/testify/suite" +) + +type NetworkInterfaceTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + networkInterfaceName string + subnetId string + virtualNetworkName string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *NetworkInterfaceTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/network/armnetwork/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.networkInterfaceName = testutil.GenerateAlphaNumericID(testsuite.T(), "networkint", 6) + testsuite.virtualNetworkName = testutil.GenerateAlphaNumericID(testsuite.T(), "vnetinterfacena", 6) + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name + testsuite.Prepare() +} + +func (testsuite *NetworkInterfaceTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestNetworkInterfaceTestSuite(t *testing.T) { + suite.Run(t, new(NetworkInterfaceTestSuite)) +} + +func (testsuite *NetworkInterfaceTestSuite) Prepare() { + var err error + // From step VirtualNetworks_CreateOrUpdate + fmt.Println("Call operation: VirtualNetworks_CreateOrUpdate") + virtualNetworksClient, err := armnetwork.NewVirtualNetworksClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + virtualNetworksClientCreateOrUpdateResponsePoller, err := virtualNetworksClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.virtualNetworkName, armnetwork.VirtualNetwork{ + Location: to.Ptr(testsuite.location), + Properties: &armnetwork.VirtualNetworkPropertiesFormat{ + AddressSpace: &armnetwork.AddressSpace{ + AddressPrefixes: []*string{ + to.Ptr("10.0.0.0/16")}, + }, + Subnets: []*armnetwork.Subnet{ + { + Name: to.Ptr("test-1"), + Properties: &armnetwork.SubnetPropertiesFormat{ + AddressPrefix: to.Ptr("10.0.0.0/24"), + }, + }}, + }, + }, nil) + testsuite.Require().NoError(err) + var virtualNetworksClientCreateOrUpdateResponse *armnetwork.VirtualNetworksClientCreateOrUpdateResponse + virtualNetworksClientCreateOrUpdateResponse, err = testutil.PollForTest(testsuite.ctx, virtualNetworksClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + testsuite.subnetId = *virtualNetworksClientCreateOrUpdateResponse.Properties.Subnets[0].ID +} + +// Microsoft.Network/networkInterfaces/{networkInterfaceName} +func (testsuite *NetworkInterfaceTestSuite) TestNetworkInterfaces() { + var err error + // From step NetworkInterfaces_CreateOrUpdate + fmt.Println("Call operation: NetworkInterfaces_CreateOrUpdate") + interfacesClient, err := armnetwork.NewInterfacesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + interfacesClientCreateOrUpdateResponsePoller, err := interfacesClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkInterfaceName, armnetwork.Interface{ + Location: to.Ptr(testsuite.location), + Properties: &armnetwork.InterfacePropertiesFormat{ + EnableAcceleratedNetworking: to.Ptr(true), + IPConfigurations: []*armnetwork.InterfaceIPConfiguration{ + { + Name: to.Ptr("ipconfig1"), + Properties: &armnetwork.InterfaceIPConfigurationPropertiesFormat{ + Subnet: &armnetwork.Subnet{ + ID: to.Ptr(testsuite.subnetId), + }, + }, + }}, + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, interfacesClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step NetworkInterfaces_Get + fmt.Println("Call operation: NetworkInterfaces_Get") + _, err = interfacesClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkInterfaceName, &armnetwork.InterfacesClientGetOptions{Expand: nil}) + testsuite.Require().NoError(err) + + // From step NetworkInterfaces_UpdateTags + fmt.Println("Call operation: NetworkInterfaces_UpdateTags") + _, err = interfacesClient.UpdateTags(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkInterfaceName, armnetwork.TagsObject{ + Tags: map[string]*string{ + "tag1": to.Ptr("value1"), + "tag2": to.Ptr("value2"), + }, + }, nil) + testsuite.Require().NoError(err) + + // From step NetworkInterfaces_ListAll + fmt.Println("Call operation: NetworkInterfaces_ListAll") + interfacesClientNewListAllPager := interfacesClient.NewListAllPager(nil) + for interfacesClientNewListAllPager.More() { + _, err := interfacesClientNewListAllPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step NetworkInterfaces_List + fmt.Println("Call operation: NetworkInterfaces_List") + interfacesClientNewListPager := interfacesClient.NewListPager(testsuite.resourceGroupName, nil) + for interfacesClientNewListPager.More() { + _, err := interfacesClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step NetworkInterfaceIPConfigurations_List + fmt.Println("Call operation: NetworkInterfaceIPConfigurations_List") + interfaceIPConfigurationsClient, err := armnetwork.NewInterfaceIPConfigurationsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + interfaceIPConfigurationsClientNewListPager := interfaceIPConfigurationsClient.NewListPager(testsuite.resourceGroupName, testsuite.networkInterfaceName, nil) + for interfaceIPConfigurationsClientNewListPager.More() { + _, err := interfaceIPConfigurationsClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step NetworkInterfaceIPConfigurations_Get + fmt.Println("Call operation: NetworkInterfaceIPConfigurations_Get") + _, err = interfaceIPConfigurationsClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkInterfaceName, "ipconfig1", nil) + testsuite.Require().NoError(err) + + // From step NetworkInterfaceLoadBalancers_List + fmt.Println("Call operation: NetworkInterfaceLoadBalancers_List") + interfaceLoadBalancersClient, err := armnetwork.NewInterfaceLoadBalancersClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + interfaceLoadBalancersClientNewListPager := interfaceLoadBalancersClient.NewListPager(testsuite.resourceGroupName, testsuite.networkInterfaceName, nil) + for interfaceLoadBalancersClientNewListPager.More() { + _, err := interfaceLoadBalancersClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step NetworkInterfaces_Delete + fmt.Println("Call operation: NetworkInterfaces_Delete") + interfacesClientDeleteResponsePoller, err := interfacesClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkInterfaceName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, interfacesClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/network/armnetwork/networkmanager_live_test.go b/sdk/resourcemanager/network/armnetwork/networkmanager_live_test.go new file mode 100644 index 000000000000..b553a1f14a19 --- /dev/null +++ b/sdk/resourcemanager/network/armnetwork/networkmanager_live_test.go @@ -0,0 +1,169 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armnetwork_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2" + "github.com/stretchr/testify/suite" +) + +type NetworkManagerTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + networkManagerName string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *NetworkManagerTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/network/armnetwork/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.networkManagerName = testutil.GenerateAlphaNumericID(testsuite.T(), "networkman", 6) + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name +} + +func (testsuite *NetworkManagerTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestNetworkManagerTestSuite(t *testing.T) { + suite.Run(t, new(NetworkManagerTestSuite)) +} + +// Microsoft.Network/networkManagers/{networkManagerName} +func (testsuite *NetworkManagerTestSuite) TestNetworkManagers() { + var err error + // From step NetworkManagers_CreateOrUpdate + fmt.Println("Call operation: NetworkManagers_CreateOrUpdate") + managersClient, err := armnetwork.NewManagersClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + _, err = managersClient.CreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkManagerName, armnetwork.Manager{ + Location: to.Ptr(testsuite.location), + Properties: &armnetwork.ManagerProperties{ + Description: to.Ptr("My Test Network Manager"), + NetworkManagerScopeAccesses: []*armnetwork.ConfigurationType{ + to.Ptr(armnetwork.ConfigurationTypeConnectivity)}, + NetworkManagerScopes: &armnetwork.ManagerPropertiesNetworkManagerScopes{ + Subscriptions: []*string{ + to.Ptr("/subscriptions/" + testsuite.subscriptionId)}, + }, + }, + }, nil) + testsuite.Require().NoError(err) + + // From step NetworkManagers_ListBySubscription + fmt.Println("Call operation: NetworkManagers_ListBySubscription") + managersClientNewListBySubscriptionPager := managersClient.NewListBySubscriptionPager(&armnetwork.ManagersClientListBySubscriptionOptions{Top: nil, + SkipToken: nil, + }) + for managersClientNewListBySubscriptionPager.More() { + _, err := managersClientNewListBySubscriptionPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step NetworkManagers_List + fmt.Println("Call operation: NetworkManagers_List") + managersClientNewListPager := managersClient.NewListPager(testsuite.resourceGroupName, &armnetwork.ManagersClientListOptions{Top: nil, + SkipToken: nil, + }) + for managersClientNewListPager.More() { + _, err := managersClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step NetworkManagers_Get + fmt.Println("Call operation: NetworkManagers_Get") + _, err = managersClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkManagerName, nil) + testsuite.Require().NoError(err) + + // From step NetworkManagers_Patch + fmt.Println("Call operation: NetworkManagers_Patch") + _, err = managersClient.Patch(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkManagerName, armnetwork.PatchObject{ + Tags: map[string]*string{ + "tag1": to.Ptr("value1"), + "tag2": to.Ptr("value2"), + }, + }, nil) + testsuite.Require().NoError(err) + + // From step NetworkManagerCommits_Post + fmt.Println("Call operation: NetworkManagerCommits_Post") + managerCommitsClient, err := armnetwork.NewManagerCommitsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + managerCommitsClientPostResponsePoller, err := managerCommitsClient.BeginPost(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkManagerName, armnetwork.ManagerCommit{ + CommitType: to.Ptr(armnetwork.ConfigurationTypeConnectivity), + TargetLocations: []*string{ + to.Ptr("eastus")}, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, managerCommitsClientPostResponsePoller) + testsuite.Require().NoError(err) + + // From step NetworkManagerDeploymentStatus_List + fmt.Println("Call operation: NetworkManagerDeploymentStatus_List") + managerDeploymentStatusClient, err := armnetwork.NewManagerDeploymentStatusClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + _, err = managerDeploymentStatusClient.List(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkManagerName, armnetwork.ManagerDeploymentStatusParameter{ + DeploymentTypes: []*armnetwork.ConfigurationType{ + to.Ptr(armnetwork.ConfigurationTypeConnectivity), + to.Ptr(armnetwork.ConfigurationTypeSecurityAdmin)}, + Regions: []*string{ + to.Ptr("eastus"), + to.Ptr("westus")}, + }, &armnetwork.ManagerDeploymentStatusClientListOptions{Top: nil}) + testsuite.Require().NoError(err) + + // From step ListActiveConnectivityConfigurations + fmt.Println("Call operation: NetworkManagementClient_ListActiveConnectivityConfigurations") + managementClient, err := armnetwork.NewManagementClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + _, err = managementClient.ListActiveConnectivityConfigurations(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkManagerName, armnetwork.ActiveConfigurationParameter{ + Regions: []*string{ + to.Ptr("westus")}, + }, &armnetwork.ManagementClientListActiveConnectivityConfigurationsOptions{Top: nil}) + testsuite.Require().NoError(err) + + // From step ListActiveSecurityAdminRules + fmt.Println("Call operation: NetworkManagementClient_ListActiveSecurityAdminRules") + _, err = managementClient.ListActiveSecurityAdminRules(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkManagerName, armnetwork.ActiveConfigurationParameter{ + Regions: []*string{ + to.Ptr("westus")}, + }, &armnetwork.ManagementClientListActiveSecurityAdminRulesOptions{Top: nil}) + testsuite.Require().NoError(err) + + // From step NetworkManagers_Delete + fmt.Println("Call operation: NetworkManagers_Delete") + managersClientDeleteResponsePoller, err := managersClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkManagerName, &armnetwork.ManagersClientBeginDeleteOptions{Force: to.Ptr(true)}) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, managersClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/network/armnetwork/networkmanagerconnection_live_test.go b/sdk/resourcemanager/network/armnetwork/networkmanagerconnection_live_test.go new file mode 100644 index 000000000000..cedbb8812208 --- /dev/null +++ b/sdk/resourcemanager/network/armnetwork/networkmanagerconnection_live_test.go @@ -0,0 +1,121 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armnetwork_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2" + "github.com/stretchr/testify/suite" +) + +type NetworkManagerConnectionTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + networkManagerConnectionName string + networkManagerId string + networkManagerName string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *NetworkManagerConnectionTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/network/armnetwork/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.networkManagerConnectionName = testutil.GenerateAlphaNumericID(testsuite.T(), "netmanagerconnsub", 6) + testsuite.networkManagerName = testutil.GenerateAlphaNumericID(testsuite.T(), "networkmanagerconn", 6) + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name + testsuite.Prepare() +} + +func (testsuite *NetworkManagerConnectionTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestNetworkManagerConnectionTestSuite(t *testing.T) { + suite.Run(t, new(NetworkManagerConnectionTestSuite)) +} + +func (testsuite *NetworkManagerConnectionTestSuite) Prepare() { + var err error + // From step NetworkManagers_CreateOrUpdate + fmt.Println("Call operation: NetworkManagers_CreateOrUpdate") + managersClient, err := armnetwork.NewManagersClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + managersClientCreateOrUpdateResponse, err := managersClient.CreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkManagerName, armnetwork.Manager{ + Location: to.Ptr(testsuite.location), + Properties: &armnetwork.ManagerProperties{ + Description: to.Ptr("My Test Network Manager"), + NetworkManagerScopeAccesses: []*armnetwork.ConfigurationType{ + to.Ptr(armnetwork.ConfigurationTypeConnectivity)}, + NetworkManagerScopes: &armnetwork.ManagerPropertiesNetworkManagerScopes{ + Subscriptions: []*string{ + to.Ptr("/subscriptions/" + testsuite.subscriptionId), + }, + }, + }, + }, nil) + testsuite.Require().NoError(err) + testsuite.networkManagerId = *managersClientCreateOrUpdateResponse.ID +} + +// Microsoft.Network/networkManagerConnections/{networkManagerConnectionName} +func (testsuite *NetworkManagerConnectionTestSuite) TestSubscriptionNetworkManagerConnections() { + var err error + // From step SubscriptionNetworkManagerConnections_CreateOrUpdate + fmt.Println("Call operation: SubscriptionNetworkManagerConnections_CreateOrUpdate") + subscriptionNetworkManagerConnectionsClient, err := armnetwork.NewSubscriptionNetworkManagerConnectionsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + _, err = subscriptionNetworkManagerConnectionsClient.CreateOrUpdate(testsuite.ctx, testsuite.networkManagerConnectionName, armnetwork.ManagerConnection{ + Properties: &armnetwork.ManagerConnectionProperties{ + NetworkManagerID: to.Ptr(testsuite.networkManagerId), + }, + }, nil) + testsuite.Require().NoError(err) + + // From step SubscriptionNetworkManagerConnections_List + fmt.Println("Call operation: SubscriptionNetworkManagerConnections_List") + subscriptionNetworkManagerConnectionsClientNewListPager := subscriptionNetworkManagerConnectionsClient.NewListPager(&armnetwork.SubscriptionNetworkManagerConnectionsClientListOptions{Top: nil, + SkipToken: nil, + }) + for subscriptionNetworkManagerConnectionsClientNewListPager.More() { + _, err := subscriptionNetworkManagerConnectionsClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step SubscriptionNetworkManagerConnections_Get + fmt.Println("Call operation: SubscriptionNetworkManagerConnections_Get") + _, err = subscriptionNetworkManagerConnectionsClient.Get(testsuite.ctx, testsuite.networkManagerConnectionName, nil) + testsuite.Require().NoError(err) + + // From step SubscriptionNetworkManagerConnections_Delete + fmt.Println("Call operation: SubscriptionNetworkManagerConnections_Delete") + _, err = subscriptionNetworkManagerConnectionsClient.Delete(testsuite.ctx, testsuite.networkManagerConnectionName, nil) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/network/armnetwork/networkmanagerconnectivityconfiguration_live_test.go b/sdk/resourcemanager/network/armnetwork/networkmanagerconnectivityconfiguration_live_test.go new file mode 100644 index 000000000000..c76c78d04438 --- /dev/null +++ b/sdk/resourcemanager/network/armnetwork/networkmanagerconnectivityconfiguration_live_test.go @@ -0,0 +1,145 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armnetwork_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2" + "github.com/stretchr/testify/suite" +) + +type NetworkManagerConnectivityConfigurationTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + configurationName string + networkGroupId string + networkGroupName string + networkManagerName string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *NetworkManagerConnectivityConfigurationTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/network/armnetwork/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.configurationName = testutil.GenerateAlphaNumericID(testsuite.T(), "connectivityconf", 6) + testsuite.networkGroupName = testutil.GenerateAlphaNumericID(testsuite.T(), "networkgroconeconfig", 6) + testsuite.networkManagerName = testutil.GenerateAlphaNumericID(testsuite.T(), "networkmancc", 6) + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name + testsuite.Prepare() +} + +func (testsuite *NetworkManagerConnectivityConfigurationTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestNetworkManagerConnectivityConfigurationTestSuite(t *testing.T) { + suite.Run(t, new(NetworkManagerConnectivityConfigurationTestSuite)) +} + +func (testsuite *NetworkManagerConnectivityConfigurationTestSuite) Prepare() { + var err error + // From step NetworkManagers_CreateOrUpdate + fmt.Println("Call operation: NetworkManagers_CreateOrUpdate") + managersClient, err := armnetwork.NewManagersClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + _, err = managersClient.CreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkManagerName, armnetwork.Manager{ + Location: to.Ptr(testsuite.location), + Properties: &armnetwork.ManagerProperties{ + Description: to.Ptr("My Test Network Manager"), + NetworkManagerScopeAccesses: []*armnetwork.ConfigurationType{ + to.Ptr(armnetwork.ConfigurationTypeConnectivity)}, + NetworkManagerScopes: &armnetwork.ManagerPropertiesNetworkManagerScopes{ + Subscriptions: []*string{ + to.Ptr("/subscriptions/" + testsuite.subscriptionId)}, + }, + }, + }, nil) + testsuite.Require().NoError(err) + + // From step NetworkGroups_CreateOrUpdate + fmt.Println("Call operation: NetworkGroups_CreateOrUpdate") + groupsClient, err := armnetwork.NewGroupsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + groupsClientCreateOrUpdateResponse, err := groupsClient.CreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkManagerName, testsuite.networkGroupName, armnetwork.Group{ + Properties: &armnetwork.GroupProperties{ + Description: to.Ptr("A sample group"), + }, + }, &armnetwork.GroupsClientCreateOrUpdateOptions{IfMatch: nil}) + testsuite.Require().NoError(err) + testsuite.networkGroupId = *groupsClientCreateOrUpdateResponse.ID +} + +// Microsoft.Network/networkManagers/{networkManagerName}/connectivityConfigurations/{configurationName} +func (testsuite *NetworkManagerConnectivityConfigurationTestSuite) TestConnectivityConfigurations() { + var err error + // From step ConnectivityConfigurations_CreateOrUpdate + fmt.Println("Call operation: ConnectivityConfigurations_CreateOrUpdate") + connectivityConfigurationsClient, err := armnetwork.NewConnectivityConfigurationsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + _, err = connectivityConfigurationsClient.CreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkManagerName, testsuite.configurationName, armnetwork.ConnectivityConfiguration{ + Properties: &armnetwork.ConnectivityConfigurationProperties{ + Description: to.Ptr("Sample Configuration"), + AppliesToGroups: []*armnetwork.ConnectivityGroupItem{ + { + GroupConnectivity: to.Ptr(armnetwork.GroupConnectivityNone), + IsGlobal: to.Ptr(armnetwork.IsGlobalFalse), + NetworkGroupID: to.Ptr(testsuite.networkGroupId), + UseHubGateway: to.Ptr(armnetwork.UseHubGatewayTrue), + }}, + ConnectivityTopology: to.Ptr(armnetwork.ConnectivityTopologyMesh), + DeleteExistingPeering: to.Ptr(armnetwork.DeleteExistingPeeringTrue), + IsGlobal: to.Ptr(armnetwork.IsGlobalTrue), + }, + }, nil) + testsuite.Require().NoError(err) + + // From step ConnectivityConfigurations_List + fmt.Println("Call operation: ConnectivityConfigurations_List") + connectivityConfigurationsClientNewListPager := connectivityConfigurationsClient.NewListPager(testsuite.resourceGroupName, testsuite.networkManagerName, &armnetwork.ConnectivityConfigurationsClientListOptions{Top: nil, + SkipToken: nil, + }) + for connectivityConfigurationsClientNewListPager.More() { + _, err := connectivityConfigurationsClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step ConnectivityConfigurations_Get + fmt.Println("Call operation: ConnectivityConfigurations_Get") + _, err = connectivityConfigurationsClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkManagerName, testsuite.configurationName, nil) + testsuite.Require().NoError(err) + + // From step ConnectivityConfigurations_Delete + fmt.Println("Call operation: ConnectivityConfigurations_Delete") + connectivityConfigurationsClientDeleteResponsePoller, err := connectivityConfigurationsClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkManagerName, testsuite.configurationName, &armnetwork.ConnectivityConfigurationsClientBeginDeleteOptions{Force: to.Ptr(false)}) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, connectivityConfigurationsClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/network/armnetwork/networkmanagergroup_live_test.go b/sdk/resourcemanager/network/armnetwork/networkmanagergroup_live_test.go new file mode 100644 index 000000000000..9d1e513855b7 --- /dev/null +++ b/sdk/resourcemanager/network/armnetwork/networkmanagergroup_live_test.go @@ -0,0 +1,188 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armnetwork_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2" + "github.com/stretchr/testify/suite" +) + +type NetworkManagerGroupTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + networkGroupName string + networkManagerName string + staticMemberName string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *NetworkManagerGroupTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/network/armnetwork/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.networkGroupName = testutil.GenerateAlphaNumericID(testsuite.T(), "networkgro", 6) + testsuite.networkManagerName = testutil.GenerateAlphaNumericID(testsuite.T(), "networkmanagergp", 6) + testsuite.staticMemberName = testutil.GenerateAlphaNumericID(testsuite.T(), "staticmemberna", 6) + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name + testsuite.Prepare() +} + +func (testsuite *NetworkManagerGroupTestSuite) TearDownSuite() { + testsuite.Cleanup() + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestNetworkManagerGroupTestSuite(t *testing.T) { + suite.Run(t, new(NetworkManagerGroupTestSuite)) +} + +func (testsuite *NetworkManagerGroupTestSuite) Prepare() { + var err error + // From step NetworkManagers_CreateOrUpdate + fmt.Println("Call operation: NetworkManagers_CreateOrUpdate") + managersClient, err := armnetwork.NewManagersClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + _, err = managersClient.CreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkManagerName, armnetwork.Manager{ + Location: to.Ptr(testsuite.location), + Properties: &armnetwork.ManagerProperties{ + Description: to.Ptr("My Test Network Manager"), + NetworkManagerScopeAccesses: []*armnetwork.ConfigurationType{ + to.Ptr(armnetwork.ConfigurationTypeConnectivity)}, + NetworkManagerScopes: &armnetwork.ManagerPropertiesNetworkManagerScopes{ + Subscriptions: []*string{ + to.Ptr("/subscriptions/" + testsuite.subscriptionId)}, + }, + }, + }, nil) + testsuite.Require().NoError(err) + + // From step NetworkGroups_CreateOrUpdate + fmt.Println("Call operation: NetworkGroups_CreateOrUpdate") + groupsClient, err := armnetwork.NewGroupsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + _, err = groupsClient.CreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkManagerName, testsuite.networkGroupName, armnetwork.Group{ + Properties: &armnetwork.GroupProperties{ + Description: to.Ptr("A sample group"), + }, + }, &armnetwork.GroupsClientCreateOrUpdateOptions{IfMatch: nil}) + testsuite.Require().NoError(err) +} + +// Microsoft.Network/networkManagers/{networkManagerName}/networkGroups/{networkGroupName} +func (testsuite *NetworkManagerGroupTestSuite) TestNetworkGroups() { + var err error + // From step NetworkGroups_List + fmt.Println("Call operation: NetworkGroups_List") + groupsClient, err := armnetwork.NewGroupsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + groupsClientNewListPager := groupsClient.NewListPager(testsuite.resourceGroupName, testsuite.networkManagerName, &armnetwork.GroupsClientListOptions{Top: nil, + SkipToken: nil, + }) + for groupsClientNewListPager.More() { + _, err := groupsClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step NetworkGroups_Get + fmt.Println("Call operation: NetworkGroups_Get") + _, err = groupsClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkManagerName, testsuite.networkGroupName, nil) + testsuite.Require().NoError(err) +} + +// Microsoft.Network/networkManagers/{networkManagerName}/networkGroups/{networkGroupName}/staticMembers/{staticMemberName} +func (testsuite *NetworkManagerGroupTestSuite) TestStaticMembers() { + virtualNetworkName := testutil.GenerateAlphaNumericID(testsuite.T(), "networkgrovet", 6) + var virutalNetworkId string + var err error + // From step VirtualNetworks_CreateOrUpdate + fmt.Println("Call operation: VirtualNetworks_CreateOrUpdate") + virtualNetworksClient, err := armnetwork.NewVirtualNetworksClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + virtualNetworksClientCreateOrUpdateResponsePoller, err := virtualNetworksClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, virtualNetworkName, armnetwork.VirtualNetwork{ + Location: to.Ptr(testsuite.location), + Properties: &armnetwork.VirtualNetworkPropertiesFormat{ + AddressSpace: &armnetwork.AddressSpace{ + AddressPrefixes: []*string{ + to.Ptr("10.0.0.0/16")}, + }, + FlowTimeoutInMinutes: to.Ptr[int32](10), + }, + }, nil) + testsuite.Require().NoError(err) + var virtualNetworksClientCreateOrUpdateResponse *armnetwork.VirtualNetworksClientCreateOrUpdateResponse + virtualNetworksClientCreateOrUpdateResponse, err = testutil.PollForTest(testsuite.ctx, virtualNetworksClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + virutalNetworkId = *virtualNetworksClientCreateOrUpdateResponse.ID + + // From step StaticMembers_CreateOrUpdate + fmt.Println("Call operation: StaticMembers_CreateOrUpdate") + staticMembersClient, err := armnetwork.NewStaticMembersClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + _, err = staticMembersClient.CreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkManagerName, testsuite.networkGroupName, testsuite.staticMemberName, armnetwork.StaticMember{ + Properties: &armnetwork.StaticMemberProperties{ + ResourceID: to.Ptr(virutalNetworkId), + }, + }, nil) + testsuite.Require().NoError(err) + + // From step StaticMembers_List + fmt.Println("Call operation: StaticMembers_List") + staticMembersClientNewListPager := staticMembersClient.NewListPager(testsuite.resourceGroupName, testsuite.networkManagerName, testsuite.networkGroupName, &armnetwork.StaticMembersClientListOptions{Top: nil, + SkipToken: nil, + }) + for staticMembersClientNewListPager.More() { + _, err := staticMembersClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step StaticMembers_Get + fmt.Println("Call operation: StaticMembers_Get") + _, err = staticMembersClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkManagerName, testsuite.networkGroupName, testsuite.staticMemberName, nil) + testsuite.Require().NoError(err) + + // From step StaticMembers_Delete + fmt.Println("Call operation: StaticMembers_Delete") + _, err = staticMembersClient.Delete(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkManagerName, testsuite.networkGroupName, testsuite.staticMemberName, nil) + testsuite.Require().NoError(err) +} + +func (testsuite *NetworkManagerGroupTestSuite) Cleanup() { + var err error + // From step NetworkGroups_Delete + fmt.Println("Call operation: NetworkGroups_Delete") + groupsClient, err := armnetwork.NewGroupsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + groupsClientDeleteResponsePoller, err := groupsClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkManagerName, testsuite.networkGroupName, &armnetwork.GroupsClientBeginDeleteOptions{Force: to.Ptr(false)}) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, groupsClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/network/armnetwork/networkmanagersecurityadminconfiguration_live_test.go b/sdk/resourcemanager/network/armnetwork/networkmanagersecurityadminconfiguration_live_test.go new file mode 100644 index 000000000000..389e7bfa8603 --- /dev/null +++ b/sdk/resourcemanager/network/armnetwork/networkmanagersecurityadminconfiguration_live_test.go @@ -0,0 +1,252 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armnetwork_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2" + "github.com/stretchr/testify/suite" +) + +type NetworkManagerSecurityAdminConfigurationTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + configurationName string + networkGroupId string + networkGroupName string + networkManagerName string + ruleCollectionName string + ruleName string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *NetworkManagerSecurityAdminConfigurationTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/network/armnetwork/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.configurationName = testutil.GenerateAlphaNumericID(testsuite.T(), "configurationsecurity", 6) + testsuite.networkGroupName = testutil.GenerateAlphaNumericID(testsuite.T(), "networkgrosecurity", 6) + testsuite.networkManagerName = testutil.GenerateAlphaNumericID(testsuite.T(), "networkmanagersecurity", 6) + testsuite.ruleCollectionName = testutil.GenerateAlphaNumericID(testsuite.T(), "rulecollec", 6) + testsuite.ruleName = testutil.GenerateAlphaNumericID(testsuite.T(), "rulename", 6) + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name + testsuite.Prepare() +} + +func (testsuite *NetworkManagerSecurityAdminConfigurationTestSuite) TearDownSuite() { + testsuite.Cleanup() + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestNetworkManagerSecurityAdminConfigurationTestSuite(t *testing.T) { + suite.Run(t, new(NetworkManagerSecurityAdminConfigurationTestSuite)) +} + +func (testsuite *NetworkManagerSecurityAdminConfigurationTestSuite) Prepare() { + var err error + // From step NetworkManagers_CreateOrUpdate + fmt.Println("Call operation: NetworkManagers_CreateOrUpdate") + managersClient, err := armnetwork.NewManagersClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + _, err = managersClient.CreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkManagerName, armnetwork.Manager{ + Location: to.Ptr(testsuite.location), + Properties: &armnetwork.ManagerProperties{ + Description: to.Ptr("My Test Network Manager"), + NetworkManagerScopeAccesses: []*armnetwork.ConfigurationType{ + to.Ptr(armnetwork.ConfigurationTypeSecurityAdmin)}, + NetworkManagerScopes: &armnetwork.ManagerPropertiesNetworkManagerScopes{ + Subscriptions: []*string{ + to.Ptr("/subscriptions/" + testsuite.subscriptionId)}, + }, + }, + }, nil) + testsuite.Require().NoError(err) + + // From step NetworkGroups_CreateOrUpdate + fmt.Println("Call operation: NetworkGroups_CreateOrUpdate") + groupsClient, err := armnetwork.NewGroupsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + groupsClientCreateOrUpdateResponse, err := groupsClient.CreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkManagerName, testsuite.networkGroupName, armnetwork.Group{ + Properties: &armnetwork.GroupProperties{ + Description: to.Ptr("A sample group"), + }, + }, &armnetwork.GroupsClientCreateOrUpdateOptions{IfMatch: nil}) + testsuite.Require().NoError(err) + testsuite.networkGroupId = *groupsClientCreateOrUpdateResponse.ID + + // From step SecurityAdminConfigurations_CreateOrUpdate + fmt.Println("Call operation: SecurityAdminConfigurations_CreateOrUpdate") + securityAdminConfigurationsClient, err := armnetwork.NewSecurityAdminConfigurationsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + _, err = securityAdminConfigurationsClient.CreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkManagerName, testsuite.configurationName, armnetwork.SecurityAdminConfiguration{ + Properties: &armnetwork.SecurityAdminConfigurationPropertiesFormat{ + Description: to.Ptr("A sample policy"), + ApplyOnNetworkIntentPolicyBasedServices: []*armnetwork.NetworkIntentPolicyBasedService{ + to.Ptr(armnetwork.NetworkIntentPolicyBasedServiceNone)}, + }, + }, nil) + testsuite.Require().NoError(err) + + // From step AdminRuleCollections_CreateOrUpdate + fmt.Println("Call operation: AdminRuleCollections_CreateOrUpdate") + adminRuleCollectionsClient, err := armnetwork.NewAdminRuleCollectionsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + _, err = adminRuleCollectionsClient.CreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkManagerName, testsuite.configurationName, testsuite.ruleCollectionName, armnetwork.AdminRuleCollection{ + Properties: &armnetwork.AdminRuleCollectionPropertiesFormat{ + Description: to.Ptr("A sample policy"), + AppliesToGroups: []*armnetwork.ManagerSecurityGroupItem{ + { + NetworkGroupID: to.Ptr(testsuite.networkGroupId), + }}, + }, + }, nil) + testsuite.Require().NoError(err) +} + +// Microsoft.Network/networkManagers/{networkManagerName}/securityAdminConfigurations/{configurationName} +func (testsuite *NetworkManagerSecurityAdminConfigurationTestSuite) TestSecurityAdminConfigurations() { + var err error + // From step SecurityAdminConfigurations_List + fmt.Println("Call operation: SecurityAdminConfigurations_List") + securityAdminConfigurationsClient, err := armnetwork.NewSecurityAdminConfigurationsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + securityAdminConfigurationsClientNewListPager := securityAdminConfigurationsClient.NewListPager(testsuite.resourceGroupName, testsuite.networkManagerName, &armnetwork.SecurityAdminConfigurationsClientListOptions{Top: nil, + SkipToken: nil, + }) + for securityAdminConfigurationsClientNewListPager.More() { + _, err := securityAdminConfigurationsClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step SecurityAdminConfigurations_Get + fmt.Println("Call operation: SecurityAdminConfigurations_Get") + _, err = securityAdminConfigurationsClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkManagerName, testsuite.configurationName, nil) + testsuite.Require().NoError(err) +} + +// Microsoft.Network/networkManagers/{networkManagerName}/securityAdminConfigurations/{configurationName}/ruleCollections/{ruleCollectionName} +func (testsuite *NetworkManagerSecurityAdminConfigurationTestSuite) TestAdminRuleCollections() { + var err error + // From step AdminRuleCollections_List + fmt.Println("Call operation: AdminRuleCollections_List") + adminRuleCollectionsClient, err := armnetwork.NewAdminRuleCollectionsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + adminRuleCollectionsClientNewListPager := adminRuleCollectionsClient.NewListPager(testsuite.resourceGroupName, testsuite.networkManagerName, testsuite.configurationName, &armnetwork.AdminRuleCollectionsClientListOptions{Top: nil, + SkipToken: nil, + }) + for adminRuleCollectionsClientNewListPager.More() { + _, err := adminRuleCollectionsClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step AdminRuleCollections_Get + fmt.Println("Call operation: AdminRuleCollections_Get") + _, err = adminRuleCollectionsClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkManagerName, testsuite.configurationName, testsuite.ruleCollectionName, nil) + testsuite.Require().NoError(err) +} + +// Microsoft.Network/networkManagers/{networkManagerName}/securityAdminConfigurations/{configurationName}/ruleCollections/{ruleCollectionName}/rules/{ruleName} +func (testsuite *NetworkManagerSecurityAdminConfigurationTestSuite) TestAdminRules() { + var err error + // From step AdminRules_CreateOrUpdate + fmt.Println("Call operation: AdminRules_CreateOrUpdate") + adminRulesClient, err := armnetwork.NewAdminRulesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + _, err = adminRulesClient.CreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkManagerName, testsuite.configurationName, testsuite.ruleCollectionName, testsuite.ruleName, &armnetwork.AdminRule{ + Kind: to.Ptr(armnetwork.AdminRuleKindCustom), + Properties: &armnetwork.AdminPropertiesFormat{ + Description: to.Ptr("This is Sample Admin Rule"), + Access: to.Ptr(armnetwork.SecurityConfigurationRuleAccessDeny), + DestinationPortRanges: []*string{ + to.Ptr("22")}, + Destinations: []*armnetwork.AddressPrefixItem{ + { + AddressPrefix: to.Ptr("*"), + AddressPrefixType: to.Ptr(armnetwork.AddressPrefixTypeIPPrefix), + }}, + Direction: to.Ptr(armnetwork.SecurityConfigurationRuleDirectionInbound), + Priority: to.Ptr[int32](1), + SourcePortRanges: []*string{ + to.Ptr("0-65535")}, + Sources: []*armnetwork.AddressPrefixItem{ + { + AddressPrefix: to.Ptr("Internet"), + AddressPrefixType: to.Ptr(armnetwork.AddressPrefixTypeServiceTag), + }}, + Protocol: to.Ptr(armnetwork.SecurityConfigurationRuleProtocolTCP), + }, + }, nil) + testsuite.Require().NoError(err) + + // From step AdminRules_List + fmt.Println("Call operation: AdminRules_List") + adminRulesClientNewListPager := adminRulesClient.NewListPager(testsuite.resourceGroupName, testsuite.networkManagerName, testsuite.configurationName, testsuite.ruleCollectionName, &armnetwork.AdminRulesClientListOptions{Top: nil, + SkipToken: nil, + }) + for adminRulesClientNewListPager.More() { + _, err := adminRulesClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step AdminRules_Get + fmt.Println("Call operation: AdminRules_Get") + _, err = adminRulesClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkManagerName, testsuite.configurationName, testsuite.ruleCollectionName, testsuite.ruleName, nil) + testsuite.Require().NoError(err) + + // From step AdminRules_Delete + fmt.Println("Call operation: AdminRules_Delete") + adminRulesClientDeleteResponsePoller, err := adminRulesClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkManagerName, testsuite.configurationName, testsuite.ruleCollectionName, testsuite.ruleName, &armnetwork.AdminRulesClientBeginDeleteOptions{Force: to.Ptr(false)}) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, adminRulesClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} + +func (testsuite *NetworkManagerSecurityAdminConfigurationTestSuite) Cleanup() { + var err error + // From step AdminRuleCollections_Delete + fmt.Println("Call operation: AdminRuleCollections_Delete") + adminRuleCollectionsClient, err := armnetwork.NewAdminRuleCollectionsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + adminRuleCollectionsClientDeleteResponsePoller, err := adminRuleCollectionsClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkManagerName, testsuite.configurationName, testsuite.ruleCollectionName, &armnetwork.AdminRuleCollectionsClientBeginDeleteOptions{Force: to.Ptr(false)}) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, adminRuleCollectionsClientDeleteResponsePoller) + testsuite.Require().NoError(err) + + // From step SecurityAdminConfigurations_Delete + fmt.Println("Call operation: SecurityAdminConfigurations_Delete") + securityAdminConfigurationsClient, err := armnetwork.NewSecurityAdminConfigurationsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + securityAdminConfigurationsClientDeleteResponsePoller, err := securityAdminConfigurationsClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkManagerName, testsuite.configurationName, &armnetwork.SecurityAdminConfigurationsClientBeginDeleteOptions{Force: to.Ptr(false)}) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, securityAdminConfigurationsClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/network/armnetwork/networkprofile_live_test.go b/sdk/resourcemanager/network/armnetwork/networkprofile_live_test.go new file mode 100644 index 000000000000..8e9874441868 --- /dev/null +++ b/sdk/resourcemanager/network/armnetwork/networkprofile_live_test.go @@ -0,0 +1,161 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armnetwork_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2" + "github.com/stretchr/testify/suite" +) + +type NetworkProfileTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + networkProfileName string + subnetId string + virtualNetworkName string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *NetworkProfileTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/network/armnetwork/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.networkProfileName = testutil.GenerateAlphaNumericID(testsuite.T(), "networkpro", 6) + testsuite.virtualNetworkName = testutil.GenerateAlphaNumericID(testsuite.T(), "vnetprofilena", 6) + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name + testsuite.Prepare() +} + +func (testsuite *NetworkProfileTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestNetworkProfileTestSuite(t *testing.T) { + suite.Run(t, new(NetworkProfileTestSuite)) +} + +func (testsuite *NetworkProfileTestSuite) Prepare() { + var err error + // From step VirtualNetworks_CreateOrUpdate + fmt.Println("Call operation: VirtualNetworks_CreateOrUpdate") + virtualNetworksClient, err := armnetwork.NewVirtualNetworksClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + virtualNetworksClientCreateOrUpdateResponsePoller, err := virtualNetworksClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.virtualNetworkName, armnetwork.VirtualNetwork{ + Location: to.Ptr(testsuite.location), + Properties: &armnetwork.VirtualNetworkPropertiesFormat{ + AddressSpace: &armnetwork.AddressSpace{ + AddressPrefixes: []*string{ + to.Ptr("10.0.0.0/16")}, + }, + Subnets: []*armnetwork.Subnet{ + { + Name: to.Ptr("test-1"), + Properties: &armnetwork.SubnetPropertiesFormat{ + AddressPrefix: to.Ptr("10.0.0.0/24"), + }, + }}, + }, + }, nil) + testsuite.Require().NoError(err) + var virtualNetworksClientCreateOrUpdateResponse *armnetwork.VirtualNetworksClientCreateOrUpdateResponse + virtualNetworksClientCreateOrUpdateResponse, err = testutil.PollForTest(testsuite.ctx, virtualNetworksClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + testsuite.subnetId = *virtualNetworksClientCreateOrUpdateResponse.Properties.Subnets[0].ID +} + +// Microsoft.Network/networkProfiles/{networkProfileName} +func (testsuite *NetworkProfileTestSuite) TestNetworkProfiles() { + var err error + // From step NetworkProfiles_CreateOrUpdate + fmt.Println("Call operation: NetworkProfiles_CreateOrUpdate") + profilesClient, err := armnetwork.NewProfilesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + _, err = profilesClient.CreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkProfileName, armnetwork.Profile{ + Location: to.Ptr(testsuite.location), + Properties: &armnetwork.ProfilePropertiesFormat{ + ContainerNetworkInterfaceConfigurations: []*armnetwork.ContainerNetworkInterfaceConfiguration{ + { + Name: to.Ptr("eth1"), + Properties: &armnetwork.ContainerNetworkInterfaceConfigurationPropertiesFormat{ + IPConfigurations: []*armnetwork.IPConfigurationProfile{ + { + Name: to.Ptr("ipconfig1"), + Properties: &armnetwork.IPConfigurationProfilePropertiesFormat{ + Subnet: &armnetwork.Subnet{ + ID: to.Ptr(testsuite.subnetId), + }, + }, + }}, + }, + }}, + }, + }, nil) + testsuite.Require().NoError(err) + + // From step NetworkProfiles_ListAll + fmt.Println("Call operation: NetworkProfiles_ListAll") + profilesClientNewListAllPager := profilesClient.NewListAllPager(nil) + for profilesClientNewListAllPager.More() { + _, err := profilesClientNewListAllPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step NetworkProfiles_List + fmt.Println("Call operation: NetworkProfiles_List") + profilesClientNewListPager := profilesClient.NewListPager(testsuite.resourceGroupName, nil) + for profilesClientNewListPager.More() { + _, err := profilesClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step NetworkProfiles_Get + fmt.Println("Call operation: NetworkProfiles_Get") + _, err = profilesClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkProfileName, &armnetwork.ProfilesClientGetOptions{Expand: nil}) + testsuite.Require().NoError(err) + + // From step NetworkProfiles_UpdateTags + fmt.Println("Call operation: NetworkProfiles_UpdateTags") + _, err = profilesClient.UpdateTags(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkProfileName, armnetwork.TagsObject{ + Tags: map[string]*string{ + "tag1": to.Ptr("value1"), + "tag2": to.Ptr("value2"), + }, + }, nil) + testsuite.Require().NoError(err) + + // From step NetworkProfiles_Delete + fmt.Println("Call operation: NetworkProfiles_Delete") + profilesClientDeleteResponsePoller, err := profilesClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkProfileName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, profilesClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/network/armnetwork/networksecuritygroup_live_test.go b/sdk/resourcemanager/network/armnetwork/networksecuritygroup_live_test.go new file mode 100644 index 000000000000..750388bc491f --- /dev/null +++ b/sdk/resourcemanager/network/armnetwork/networksecuritygroup_live_test.go @@ -0,0 +1,191 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armnetwork_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2" + "github.com/stretchr/testify/suite" +) + +type NetworkSecurityGroupTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + networkSecurityGroupName string + securityRuleName string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *NetworkSecurityGroupTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/network/armnetwork/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.networkSecurityGroupName = testutil.GenerateAlphaNumericID(testsuite.T(), "networksec", 6) + testsuite.securityRuleName = testutil.GenerateAlphaNumericID(testsuite.T(), "securityru", 6) + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name + testsuite.Prepare() +} + +func (testsuite *NetworkSecurityGroupTestSuite) TearDownSuite() { + testsuite.Cleanup() + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestNetworkSecurityGroupTestSuite(t *testing.T) { + suite.Run(t, new(NetworkSecurityGroupTestSuite)) +} + +func (testsuite *NetworkSecurityGroupTestSuite) Prepare() { + var err error + // From step NetworkSecurityGroups_CreateOrUpdate + fmt.Println("Call operation: NetworkSecurityGroups_CreateOrUpdate") + securityGroupsClient, err := armnetwork.NewSecurityGroupsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + securityGroupsClientCreateOrUpdateResponsePoller, err := securityGroupsClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkSecurityGroupName, armnetwork.SecurityGroup{ + Location: to.Ptr(testsuite.location), + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, securityGroupsClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) +} + +// Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName} +func (testsuite *NetworkSecurityGroupTestSuite) TestNetworkSecurityGroups() { + var err error + // From step NetworkSecurityGroups_ListAll + fmt.Println("Call operation: NetworkSecurityGroups_ListAll") + securityGroupsClient, err := armnetwork.NewSecurityGroupsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + securityGroupsClientNewListAllPager := securityGroupsClient.NewListAllPager(nil) + for securityGroupsClientNewListAllPager.More() { + _, err := securityGroupsClientNewListAllPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step NetworkSecurityGroups_List + fmt.Println("Call operation: NetworkSecurityGroups_List") + securityGroupsClientNewListPager := securityGroupsClient.NewListPager(testsuite.resourceGroupName, nil) + for securityGroupsClientNewListPager.More() { + _, err := securityGroupsClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step NetworkSecurityGroups_Get + fmt.Println("Call operation: NetworkSecurityGroups_Get") + _, err = securityGroupsClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkSecurityGroupName, &armnetwork.SecurityGroupsClientGetOptions{Expand: nil}) + testsuite.Require().NoError(err) + + // From step NetworkSecurityGroups_UpdateTags + fmt.Println("Call operation: NetworkSecurityGroups_UpdateTags") + _, err = securityGroupsClient.UpdateTags(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkSecurityGroupName, armnetwork.TagsObject{ + Tags: map[string]*string{ + "tag1": to.Ptr("value1"), + "tag2": to.Ptr("value2"), + }, + }, nil) + testsuite.Require().NoError(err) +} + +// Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName} +func (testsuite *NetworkSecurityGroupTestSuite) TestSecurityRules() { + var defaultSecurityRuleName string + var err error + // From step SecurityRules_CreateOrUpdate + fmt.Println("Call operation: SecurityRules_CreateOrUpdate") + securityRulesClient, err := armnetwork.NewSecurityRulesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + securityRulesClientCreateOrUpdateResponsePoller, err := securityRulesClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkSecurityGroupName, testsuite.securityRuleName, armnetwork.SecurityRule{ + Properties: &armnetwork.SecurityRulePropertiesFormat{ + Access: to.Ptr(armnetwork.SecurityRuleAccessDeny), + DestinationAddressPrefix: to.Ptr("11.0.0.0/8"), + DestinationPortRange: to.Ptr("8080"), + Direction: to.Ptr(armnetwork.SecurityRuleDirectionOutbound), + Priority: to.Ptr[int32](100), + SourceAddressPrefix: to.Ptr("10.0.0.0/8"), + SourcePortRange: to.Ptr("*"), + Protocol: to.Ptr(armnetwork.SecurityRuleProtocolAsterisk), + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, securityRulesClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step SecurityRules_List + fmt.Println("Call operation: SecurityRules_List") + securityRulesClientNewListPager := securityRulesClient.NewListPager(testsuite.resourceGroupName, testsuite.networkSecurityGroupName, nil) + for securityRulesClientNewListPager.More() { + _, err := securityRulesClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step SecurityRules_Get + fmt.Println("Call operation: SecurityRules_Get") + _, err = securityRulesClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkSecurityGroupName, testsuite.securityRuleName, nil) + testsuite.Require().NoError(err) + + // From step DefaultSecurityRules_List + fmt.Println("Call operation: DefaultSecurityRules_List") + defaultSecurityRulesClient, err := armnetwork.NewDefaultSecurityRulesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + defaultSecurityRulesClientNewListPager := defaultSecurityRulesClient.NewListPager(testsuite.resourceGroupName, testsuite.networkSecurityGroupName, nil) + for defaultSecurityRulesClientNewListPager.More() { + nextResult, err := defaultSecurityRulesClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + + defaultSecurityRuleName = *nextResult.Value[0].Name + break + } + + // From step DefaultSecurityRules_Get + fmt.Println("Call operation: DefaultSecurityRules_Get") + _, err = defaultSecurityRulesClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkSecurityGroupName, defaultSecurityRuleName, nil) + testsuite.Require().NoError(err) + + // From step SecurityRules_Delete + fmt.Println("Call operation: SecurityRules_Delete") + securityRulesClientDeleteResponsePoller, err := securityRulesClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkSecurityGroupName, testsuite.securityRuleName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, securityRulesClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} + +func (testsuite *NetworkSecurityGroupTestSuite) Cleanup() { + var err error + // From step NetworkSecurityGroups_Delete + fmt.Println("Call operation: NetworkSecurityGroups_Delete") + securityGroupsClient, err := armnetwork.NewSecurityGroupsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + securityGroupsClientDeleteResponsePoller, err := securityGroupsClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkSecurityGroupName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, securityGroupsClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/network/armnetwork/networkwatcher_live_test.go b/sdk/resourcemanager/network/armnetwork/networkwatcher_live_test.go new file mode 100644 index 000000000000..0af672b6402d --- /dev/null +++ b/sdk/resourcemanager/network/armnetwork/networkwatcher_live_test.go @@ -0,0 +1,113 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armnetwork_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2" + "github.com/stretchr/testify/suite" +) + +type NetworkWatcherTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + networkWatcherName string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *NetworkWatcherTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/network/armnetwork/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.networkWatcherName = testutil.GenerateAlphaNumericID(testsuite.T(), "networkwat", 6) + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name +} + +func (testsuite *NetworkWatcherTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestNetworkWatcherTestSuite(t *testing.T) { + suite.Run(t, new(NetworkWatcherTestSuite)) +} + +// Microsoft.Network/networkWatchers/{networkWatcherName} +func (testsuite *NetworkWatcherTestSuite) TestNetworkWatchers() { + var err error + + // From step NetworkWatchers_CreateOrUpdate + watchersClient, err := armnetwork.NewWatchersClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + fmt.Println("Call operation: NetworkWatchers_CreateOrUpdate") + _, err = watchersClient.CreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkWatcherName, armnetwork.Watcher{ + Location: to.Ptr(testsuite.location), + Properties: &armnetwork.WatcherPropertiesFormat{}, + }, nil) + testsuite.Require().NoError(err) + + // From step NetworkWatchers_ListAll + fmt.Println("Call operation: NetworkWatchers_ListAll") + watchersClientNewListAllPager := watchersClient.NewListAllPager(nil) + for watchersClientNewListAllPager.More() { + _, err := watchersClientNewListAllPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step NetworkWatchers_List + fmt.Println("Call operation: NetworkWatchers_List") + watchersClientNewListPager := watchersClient.NewListPager(testsuite.resourceGroupName, nil) + for watchersClientNewListPager.More() { + _, err := watchersClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step NetworkWatchers_Get + fmt.Println("Call operation: NetworkWatchers_Get") + _, err = watchersClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkWatcherName, nil) + testsuite.Require().NoError(err) + + // From step NetworkWatchers_UpdateTags + fmt.Println("Call operation: NetworkWatchers_UpdateTags") + _, err = watchersClient.UpdateTags(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkWatcherName, armnetwork.TagsObject{ + Tags: map[string]*string{ + "tag1": to.Ptr("value1"), + "tag2": to.Ptr("value2"), + }, + }, nil) + testsuite.Require().NoError(err) + + // From step NetworkWatchers_Delete + fmt.Println("Call operation: NetworkWatchers_Delete") + watchersClientDeleteResponsePoller, err := watchersClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.networkWatcherName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, watchersClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/network/armnetwork/operation_live_test.go b/sdk/resourcemanager/network/armnetwork/operation_live_test.go new file mode 100644 index 000000000000..3d6327188031 --- /dev/null +++ b/sdk/resourcemanager/network/armnetwork/operation_live_test.go @@ -0,0 +1,70 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armnetwork_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2" + "github.com/stretchr/testify/suite" +) + +type OperationTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *OperationTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/network/armnetwork/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name +} + +func (testsuite *OperationTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestOperationTestSuite(t *testing.T) { + suite.Run(t, new(OperationTestSuite)) +} + +// crosoft.Network/operations +func (testsuite *OperationTestSuite) TestOperations() { + var err error + // From step Operations_List + fmt.Println("Call operation: Operations_List") + operationsClient, err := armnetwork.NewOperationsClient(testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + operationsClientNewListPager := operationsClient.NewListPager(nil) + for operationsClientNewListPager.More() { + _, err := operationsClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } +} diff --git a/sdk/resourcemanager/network/armnetwork/publicipaddress_live_test.go b/sdk/resourcemanager/network/armnetwork/publicipaddress_live_test.go new file mode 100644 index 000000000000..1272f29835f5 --- /dev/null +++ b/sdk/resourcemanager/network/armnetwork/publicipaddress_live_test.go @@ -0,0 +1,113 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armnetwork_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2" + "github.com/stretchr/testify/suite" +) + +type PublicIpAddressTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + publicIpAddressName string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *PublicIpAddressTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/network/armnetwork/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.publicIpAddressName = testutil.GenerateAlphaNumericID(testsuite.T(), "publicipad", 6) + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name +} + +func (testsuite *PublicIpAddressTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestPublicIpAddressTestSuite(t *testing.T) { + suite.Run(t, new(PublicIpAddressTestSuite)) +} + +// Microsoft.Network/publicIPAddresses/{publicIpAddressName} +func (testsuite *PublicIpAddressTestSuite) TestPublicIpAddresses() { + var err error + // From step PublicIPAddresses_CreateOrUpdate + fmt.Println("Call operation: PublicIPAddresses_CreateOrUpdate") + publicIPAddressesClient, err := armnetwork.NewPublicIPAddressesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + publicIPAddressesClientCreateOrUpdateResponsePoller, err := publicIPAddressesClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.publicIpAddressName, armnetwork.PublicIPAddress{ + Location: to.Ptr(testsuite.location), + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, publicIPAddressesClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step PublicIPAddresses_ListAll + fmt.Println("Call operation: PublicIPAddresses_ListAll") + publicIPAddressesClientNewListAllPager := publicIPAddressesClient.NewListAllPager(nil) + for publicIPAddressesClientNewListAllPager.More() { + _, err := publicIPAddressesClientNewListAllPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step PublicIPAddresses_List + fmt.Println("Call operation: PublicIPAddresses_List") + publicIPAddressesClientNewListPager := publicIPAddressesClient.NewListPager(testsuite.resourceGroupName, nil) + for publicIPAddressesClientNewListPager.More() { + _, err := publicIPAddressesClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step PublicIPAddresses_Get + fmt.Println("Call operation: PublicIPAddresses_Get") + _, err = publicIPAddressesClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.publicIpAddressName, &armnetwork.PublicIPAddressesClientGetOptions{Expand: nil}) + testsuite.Require().NoError(err) + + // From step PublicIPAddresses_UpdateTags + fmt.Println("Call operation: PublicIPAddresses_UpdateTags") + _, err = publicIPAddressesClient.UpdateTags(testsuite.ctx, testsuite.resourceGroupName, testsuite.publicIpAddressName, armnetwork.TagsObject{ + Tags: map[string]*string{ + "tag1": to.Ptr("value1"), + "tag2": to.Ptr("value2"), + }, + }, nil) + testsuite.Require().NoError(err) + + // From step PublicIPAddresses_Delete + fmt.Println("Call operation: PublicIPAddresses_Delete") + publicIPAddressesClientDeleteResponsePoller, err := publicIPAddressesClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.publicIpAddressName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, publicIPAddressesClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/network/armnetwork/publicipprefix_live_test.go b/sdk/resourcemanager/network/armnetwork/publicipprefix_live_test.go new file mode 100644 index 000000000000..e6e675a993f8 --- /dev/null +++ b/sdk/resourcemanager/network/armnetwork/publicipprefix_live_test.go @@ -0,0 +1,119 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armnetwork_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2" + "github.com/stretchr/testify/suite" +) + +type PublicIpPrefixTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + publicIpPrefixName string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *PublicIpPrefixTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/network/armnetwork/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.publicIpPrefixName = testutil.GenerateAlphaNumericID(testsuite.T(), "publicippr", 6) + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name +} + +func (testsuite *PublicIpPrefixTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestPublicIpPrefixTestSuite(t *testing.T) { + suite.Run(t, new(PublicIpPrefixTestSuite)) +} + +// Microsoft.Network/publicIPPrefixes/{publicIpPrefixName} +func (testsuite *PublicIpPrefixTestSuite) TestPublicIpPrefixes() { + var err error + // From step PublicIPPrefixes_CreateOrUpdate + fmt.Println("Call operation: PublicIPPrefixes_CreateOrUpdate") + publicIPPrefixesClient, err := armnetwork.NewPublicIPPrefixesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + publicIPPrefixesClientCreateOrUpdateResponsePoller, err := publicIPPrefixesClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.publicIpPrefixName, armnetwork.PublicIPPrefix{ + Location: to.Ptr(testsuite.location), + Properties: &armnetwork.PublicIPPrefixPropertiesFormat{ + PrefixLength: to.Ptr[int32](30), + }, + SKU: &armnetwork.PublicIPPrefixSKU{ + Name: to.Ptr(armnetwork.PublicIPPrefixSKUNameStandard), + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, publicIPPrefixesClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step PublicIPPrefixes_ListAll + fmt.Println("Call operation: PublicIPPrefixes_ListAll") + publicIPPrefixesClientNewListAllPager := publicIPPrefixesClient.NewListAllPager(nil) + for publicIPPrefixesClientNewListAllPager.More() { + _, err := publicIPPrefixesClientNewListAllPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step PublicIPPrefixes_List + fmt.Println("Call operation: PublicIPPrefixes_List") + publicIPPrefixesClientNewListPager := publicIPPrefixesClient.NewListPager(testsuite.resourceGroupName, nil) + for publicIPPrefixesClientNewListPager.More() { + _, err := publicIPPrefixesClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step PublicIPPrefixes_Get + fmt.Println("Call operation: PublicIPPrefixes_Get") + _, err = publicIPPrefixesClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.publicIpPrefixName, &armnetwork.PublicIPPrefixesClientGetOptions{Expand: nil}) + testsuite.Require().NoError(err) + + // From step PublicIPPrefixes_UpdateTags + fmt.Println("Call operation: PublicIPPrefixes_UpdateTags") + _, err = publicIPPrefixesClient.UpdateTags(testsuite.ctx, testsuite.resourceGroupName, testsuite.publicIpPrefixName, armnetwork.TagsObject{ + Tags: map[string]*string{ + "tag1": to.Ptr("value1"), + "tag2": to.Ptr("value2"), + }, + }, nil) + testsuite.Require().NoError(err) + + // From step PublicIPPrefixes_Delete + fmt.Println("Call operation: PublicIPPrefixes_Delete") + publicIPPrefixesClientDeleteResponsePoller, err := publicIPPrefixesClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.publicIpPrefixName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, publicIPPrefixesClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/network/armnetwork/routetable_live_test.go b/sdk/resourcemanager/network/armnetwork/routetable_live_test.go new file mode 100644 index 000000000000..2546237dc326 --- /dev/null +++ b/sdk/resourcemanager/network/armnetwork/routetable_live_test.go @@ -0,0 +1,165 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armnetwork_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2" + "github.com/stretchr/testify/suite" +) + +type RouteTableTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + routeTableName string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *RouteTableTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/network/armnetwork/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.routeTableName = testutil.GenerateAlphaNumericID(testsuite.T(), "routetable", 6) + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name + testsuite.Prepare() +} + +func (testsuite *RouteTableTestSuite) TearDownSuite() { + testsuite.Cleanup() + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestRouteTableTestSuite(t *testing.T) { + suite.Run(t, new(RouteTableTestSuite)) +} + +func (testsuite *RouteTableTestSuite) Prepare() { + var err error + // From step RouteTables_CreateOrUpdate + fmt.Println("Call operation: RouteTables_CreateOrUpdate") + routeTablesClient, err := armnetwork.NewRouteTablesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + routeTablesClientCreateOrUpdateResponsePoller, err := routeTablesClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.routeTableName, armnetwork.RouteTable{ + Location: to.Ptr(testsuite.location), + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, routeTablesClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) +} + +// Microsoft.Network/routeTables/{routeTableName} +func (testsuite *RouteTableTestSuite) TestRouteTables() { + var err error + // From step RouteTables_ListAll + fmt.Println("Call operation: RouteTables_ListAll") + routeTablesClient, err := armnetwork.NewRouteTablesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + routeTablesClientNewListAllPager := routeTablesClient.NewListAllPager(nil) + for routeTablesClientNewListAllPager.More() { + _, err := routeTablesClientNewListAllPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step RouteTables_List + fmt.Println("Call operation: RouteTables_List") + routeTablesClientNewListPager := routeTablesClient.NewListPager(testsuite.resourceGroupName, nil) + for routeTablesClientNewListPager.More() { + _, err := routeTablesClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step RouteTables_Get + fmt.Println("Call operation: RouteTables_Get") + _, err = routeTablesClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.routeTableName, &armnetwork.RouteTablesClientGetOptions{Expand: nil}) + testsuite.Require().NoError(err) + + // From step RouteTables_UpdateTags + fmt.Println("Call operation: RouteTables_UpdateTags") + _, err = routeTablesClient.UpdateTags(testsuite.ctx, testsuite.resourceGroupName, testsuite.routeTableName, armnetwork.TagsObject{ + Tags: map[string]*string{ + "tag1": to.Ptr("value1"), + "tag2": to.Ptr("value2"), + }, + }, nil) + testsuite.Require().NoError(err) +} + +// Microsoft.Network/routeTables/{routeTableName}/routes/{routeName} +func (testsuite *RouteTableTestSuite) TestRoutes() { + routeName := testutil.GenerateAlphaNumericID(testsuite.T(), "routename", 6) + var err error + // From step Routes_CreateOrUpdate + fmt.Println("Call operation: Routes_CreateOrUpdate") + routesClient, err := armnetwork.NewRoutesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + routesClientCreateOrUpdateResponsePoller, err := routesClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.routeTableName, routeName, armnetwork.Route{ + Properties: &armnetwork.RoutePropertiesFormat{ + AddressPrefix: to.Ptr("10.0.3.0/24"), + NextHopType: to.Ptr(armnetwork.RouteNextHopTypeVirtualNetworkGateway), + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, routesClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step Routes_List + fmt.Println("Call operation: Routes_List") + routesClientNewListPager := routesClient.NewListPager(testsuite.resourceGroupName, testsuite.routeTableName, nil) + for routesClientNewListPager.More() { + _, err := routesClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step Routes_Get + fmt.Println("Call operation: Routes_Get") + _, err = routesClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.routeTableName, routeName, nil) + testsuite.Require().NoError(err) + + // From step Routes_Delete + fmt.Println("Call operation: Routes_Delete") + routesClientDeleteResponsePoller, err := routesClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.routeTableName, routeName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, routesClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} + +func (testsuite *RouteTableTestSuite) Cleanup() { + var err error + // From step RouteTables_Delete + fmt.Println("Call operation: RouteTables_Delete") + routeTablesClient, err := armnetwork.NewRouteTablesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + routeTablesClientDeleteResponsePoller, err := routeTablesClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.routeTableName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, routeTablesClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/network/armnetwork/servicecommunity_live_test.go b/sdk/resourcemanager/network/armnetwork/servicecommunity_live_test.go new file mode 100644 index 000000000000..b5dc4ec8fee1 --- /dev/null +++ b/sdk/resourcemanager/network/armnetwork/servicecommunity_live_test.go @@ -0,0 +1,70 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armnetwork_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2" + "github.com/stretchr/testify/suite" +) + +type ServiceCommunityTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *ServiceCommunityTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/network/armnetwork/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name +} + +func (testsuite *ServiceCommunityTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestServiceCommunityTestSuite(t *testing.T) { + suite.Run(t, new(ServiceCommunityTestSuite)) +} + +// Microsoft.Network/bgpServiceCommunities +func (testsuite *ServiceCommunityTestSuite) TestBgpServiceCommunities() { + var err error + // From step BgpServiceCommunities_List + fmt.Println("Call operation: BgpServiceCommunities_List") + bgpServiceCommunitiesClient, err := armnetwork.NewBgpServiceCommunitiesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + bgpServiceCommunitiesClientNewListPager := bgpServiceCommunitiesClient.NewListPager(nil) + for bgpServiceCommunitiesClientNewListPager.More() { + _, err := bgpServiceCommunitiesClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } +} diff --git a/sdk/resourcemanager/network/armnetwork/serviceendpointpolicy_live_test.go b/sdk/resourcemanager/network/armnetwork/serviceendpointpolicy_live_test.go new file mode 100644 index 000000000000..18027d942a6f --- /dev/null +++ b/sdk/resourcemanager/network/armnetwork/serviceendpointpolicy_live_test.go @@ -0,0 +1,167 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armnetwork_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2" + "github.com/stretchr/testify/suite" +) + +type ServiceEndpointPolicyTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + serviceEndpointPolicyName string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *ServiceEndpointPolicyTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/network/armnetwork/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.serviceEndpointPolicyName = testutil.GenerateAlphaNumericID(testsuite.T(), "serviceend", 6) + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name + testsuite.Prepare() +} + +func (testsuite *ServiceEndpointPolicyTestSuite) TearDownSuite() { + testsuite.Cleanup() + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestServiceEndpointPolicyTestSuite(t *testing.T) { + suite.Run(t, new(ServiceEndpointPolicyTestSuite)) +} + +func (testsuite *ServiceEndpointPolicyTestSuite) Prepare() { + var err error + // From step ServiceEndpointPolicies_CreateOrUpdate + fmt.Println("Call operation: ServiceEndpointPolicies_CreateOrUpdate") + serviceEndpointPoliciesClient, err := armnetwork.NewServiceEndpointPoliciesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + serviceEndpointPoliciesClientCreateOrUpdateResponsePoller, err := serviceEndpointPoliciesClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.serviceEndpointPolicyName, armnetwork.ServiceEndpointPolicy{ + Location: to.Ptr(testsuite.location), + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, serviceEndpointPoliciesClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) +} + +// Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName} +func (testsuite *ServiceEndpointPolicyTestSuite) TestServiceEndpointPolicies() { + var err error + // From step ServiceEndpointPolicies_List + fmt.Println("Call operation: ServiceEndpointPolicies_List") + serviceEndpointPoliciesClient, err := armnetwork.NewServiceEndpointPoliciesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + serviceEndpointPoliciesClientNewListPager := serviceEndpointPoliciesClient.NewListPager(nil) + for serviceEndpointPoliciesClientNewListPager.More() { + _, err := serviceEndpointPoliciesClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step ServiceEndpointPolicies_ListByResourceGroup + fmt.Println("Call operation: ServiceEndpointPolicies_ListByResourceGroup") + serviceEndpointPoliciesClientNewListByResourceGroupPager := serviceEndpointPoliciesClient.NewListByResourceGroupPager(testsuite.resourceGroupName, nil) + for serviceEndpointPoliciesClientNewListByResourceGroupPager.More() { + _, err := serviceEndpointPoliciesClientNewListByResourceGroupPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step ServiceEndpointPolicies_Get + fmt.Println("Call operation: ServiceEndpointPolicies_Get") + _, err = serviceEndpointPoliciesClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.serviceEndpointPolicyName, &armnetwork.ServiceEndpointPoliciesClientGetOptions{Expand: nil}) + testsuite.Require().NoError(err) + + // From step ServiceEndpointPolicies_UpdateTags + fmt.Println("Call operation: ServiceEndpointPolicies_UpdateTags") + _, err = serviceEndpointPoliciesClient.UpdateTags(testsuite.ctx, testsuite.resourceGroupName, testsuite.serviceEndpointPolicyName, armnetwork.TagsObject{ + Tags: map[string]*string{ + "tag1": to.Ptr("value1"), + "tag2": to.Ptr("value2"), + }, + }, nil) + testsuite.Require().NoError(err) +} + +// Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}/serviceEndpointPolicyDefinitions/{serviceEndpointPolicyDefinitionName} +func (testsuite *ServiceEndpointPolicyTestSuite) TestServiceEndpointPolicyDefinitions() { + serviceEndpointPolicyDefinitionName := testutil.GenerateAlphaNumericID(testsuite.T(), "serviceend", 6) + var err error + // From step ServiceEndpointPolicyDefinitions_CreateOrUpdate + fmt.Println("Call operation: ServiceEndpointPolicyDefinitions_CreateOrUpdate") + serviceEndpointPolicyDefinitionsClient, err := armnetwork.NewServiceEndpointPolicyDefinitionsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + serviceEndpointPolicyDefinitionsClientCreateOrUpdateResponsePoller, err := serviceEndpointPolicyDefinitionsClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.serviceEndpointPolicyName, serviceEndpointPolicyDefinitionName, armnetwork.ServiceEndpointPolicyDefinition{ + Properties: &armnetwork.ServiceEndpointPolicyDefinitionPropertiesFormat{ + Description: to.Ptr("Storage Service EndpointPolicy Definition"), + Service: to.Ptr("Microsoft.Storage"), + ServiceResources: []*string{ + to.Ptr("/subscriptions/" + testsuite.subscriptionId)}, + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, serviceEndpointPolicyDefinitionsClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step ServiceEndpointPolicyDefinitions_ListByResourceGroup + fmt.Println("Call operation: ServiceEndpointPolicyDefinitions_ListByResourceGroup") + serviceEndpointPolicyDefinitionsClientNewListByResourceGroupPager := serviceEndpointPolicyDefinitionsClient.NewListByResourceGroupPager(testsuite.resourceGroupName, testsuite.serviceEndpointPolicyName, nil) + for serviceEndpointPolicyDefinitionsClientNewListByResourceGroupPager.More() { + _, err := serviceEndpointPolicyDefinitionsClientNewListByResourceGroupPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step ServiceEndpointPolicyDefinitions_Get + fmt.Println("Call operation: ServiceEndpointPolicyDefinitions_Get") + _, err = serviceEndpointPolicyDefinitionsClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.serviceEndpointPolicyName, serviceEndpointPolicyDefinitionName, nil) + testsuite.Require().NoError(err) + + // From step ServiceEndpointPolicyDefinitions_Delete + fmt.Println("Call operation: ServiceEndpointPolicyDefinitions_Delete") + serviceEndpointPolicyDefinitionsClientDeleteResponsePoller, err := serviceEndpointPolicyDefinitionsClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.serviceEndpointPolicyName, serviceEndpointPolicyDefinitionName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, serviceEndpointPolicyDefinitionsClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} + +func (testsuite *ServiceEndpointPolicyTestSuite) Cleanup() { + var err error + // From step ServiceEndpointPolicies_Delete + fmt.Println("Call operation: ServiceEndpointPolicies_Delete") + serviceEndpointPoliciesClient, err := armnetwork.NewServiceEndpointPoliciesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + serviceEndpointPoliciesClientDeleteResponsePoller, err := serviceEndpointPoliciesClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.serviceEndpointPolicyName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, serviceEndpointPoliciesClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/network/armnetwork/servicetags_live_test.go b/sdk/resourcemanager/network/armnetwork/servicetags_live_test.go new file mode 100644 index 000000000000..ab9d8a3a16b8 --- /dev/null +++ b/sdk/resourcemanager/network/armnetwork/servicetags_live_test.go @@ -0,0 +1,79 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armnetwork_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2" + "github.com/stretchr/testify/suite" +) + +type ServiceTagsTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *ServiceTagsTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/network/armnetwork/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name +} + +func (testsuite *ServiceTagsTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestServiceTagsTestSuite(t *testing.T) { + suite.Run(t, new(ServiceTagsTestSuite)) +} + +// Microsoft.Network/locations/{location}/serviceTags +func (testsuite *ServiceTagsTestSuite) TestServiceTags() { + var err error + // From step ServiceTags_List + fmt.Println("Call operation: ServiceTags_List") + serviceTagsClient, err := armnetwork.NewServiceTagsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + _, err = serviceTagsClient.List(testsuite.ctx, testsuite.location, nil) + testsuite.Require().NoError(err) + + // From step ServiceTagInformation_List + fmt.Println("Call operation: ServiceTagInformation_List") + serviceTagInformationClient, err := armnetwork.NewServiceTagInformationClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + serviceTagInformationClientNewListPager := serviceTagInformationClient.NewListPager(testsuite.location, &armnetwork.ServiceTagInformationClientListOptions{NoAddressPrefixes: nil, + TagName: nil, + }) + for serviceTagInformationClientNewListPager.More() { + _, err := serviceTagInformationClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } +} diff --git a/sdk/resourcemanager/network/armnetwork/subnets_client_live_test.go b/sdk/resourcemanager/network/armnetwork/subnets_client_live_test.go deleted file mode 100644 index 76fa83c5f6d6..000000000000 --- a/sdk/resourcemanager/network/armnetwork/subnets_client_live_test.go +++ /dev/null @@ -1,122 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package armnetwork_test - -import ( - "context" - "fmt" - "testing" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2" - "github.com/stretchr/testify/suite" -) - -type SubnetsClientTestSuite struct { - suite.Suite - - ctx context.Context - cred azcore.TokenCredential - options *arm.ClientOptions - location string - resourceGroupName string - subscriptionID string -} - -func (testsuite *SubnetsClientTestSuite) SetupSuite() { - testsuite.ctx = context.Background() - testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) - testsuite.location = testutil.GetEnv("LOCATION", "eastus") - testsuite.subscriptionID = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") - testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/network/armnetwork/testdata") - resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionID, testsuite.cred, testsuite.options, testsuite.location) - testsuite.Require().NoError(err) - testsuite.resourceGroupName = *resourceGroup.Name -} - -func (testsuite *SubnetsClientTestSuite) TearDownSuite() { - _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionID, testsuite.cred, testsuite.options, testsuite.resourceGroupName) - testsuite.Require().NoError(err) - testutil.StopRecording(testsuite.T()) -} - -func TestSubnetsClient(t *testing.T) { - suite.Run(t, new(SubnetsClientTestSuite)) -} - -func (testsuite *SubnetsClientTestSuite) TestSubnetsCRUD() { - // create virtual network - fmt.Println("Call operation: VirtualNetworks_CreateOrUpdate") - vnClient, err := armnetwork.NewVirtualNetworksClient(testsuite.subscriptionID, testsuite.cred, testsuite.options) - testsuite.Require().NoError(err) - vnName := "go-test-vn" - vnPoller, err := vnClient.BeginCreateOrUpdate( - testsuite.ctx, - testsuite.resourceGroupName, - vnName, - armnetwork.VirtualNetwork{ - Location: to.Ptr(testsuite.location), - Properties: &armnetwork.VirtualNetworkPropertiesFormat{ - AddressSpace: &armnetwork.AddressSpace{ - AddressPrefixes: []*string{ - to.Ptr("10.1.0.0/16"), - }, - }, - }, - }, - nil, - ) - testsuite.Require().NoError(err) - vnResp, err := testutil.PollForTest(testsuite.ctx, vnPoller) - testsuite.Require().NoError(err) - testsuite.Require().Equal(vnName, *vnResp.Name) - - // create subnet - fmt.Println("Call operation: Subnets_CreateOrUpdate") - subClient, err := armnetwork.NewSubnetsClient(testsuite.subscriptionID, testsuite.cred, testsuite.options) - testsuite.Require().NoError(err) - subName := "go-test-subnet" - subPoller, err := subClient.BeginCreateOrUpdate( - testsuite.ctx, - testsuite.resourceGroupName, - vnName, - subName, - armnetwork.Subnet{ - Properties: &armnetwork.SubnetPropertiesFormat{ - AddressPrefix: to.Ptr("10.1.10.0/24"), - }, - }, - nil, - ) - testsuite.Require().NoError(err) - subResp, err := testutil.PollForTest(testsuite.ctx, subPoller) - testsuite.Require().NoError(err) - testsuite.Require().Equal(subName, *subResp.Name) - - // get subnet - fmt.Println("Call operation: Subnets_Get") - getResp, err := subClient.Get(testsuite.ctx, testsuite.resourceGroupName, vnName, subName, nil) - testsuite.Require().NoError(err) - testsuite.Require().Equal(subName, *getResp.Name) - - // list subnet - fmt.Println("Call operation: Subnets_List") - listPager := subClient.NewListPager(testsuite.resourceGroupName, vnName, nil) - testsuite.Require().Equal(true, listPager.More()) - - // delete subnet - fmt.Println("Call operation: Subnets_Delete") - delPoller, err := subClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, vnName, subName, nil) - testsuite.Require().NoError(err) - delResp, err := testutil.PollForTest(testsuite.ctx, delPoller) - testsuite.Require().NoError(err) - //testsuite.Require().Equal(200, delResp.RawResponse.StatusCode) - _ = delResp -} diff --git a/sdk/resourcemanager/network/armnetwork/usage_live_test.go b/sdk/resourcemanager/network/armnetwork/usage_live_test.go new file mode 100644 index 000000000000..f992f35161c5 --- /dev/null +++ b/sdk/resourcemanager/network/armnetwork/usage_live_test.go @@ -0,0 +1,70 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armnetwork_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2" + "github.com/stretchr/testify/suite" +) + +type UsageTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *UsageTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/network/armnetwork/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name +} + +func (testsuite *UsageTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestUsageTestSuite(t *testing.T) { + suite.Run(t, new(UsageTestSuite)) +} + +// Microsoft.Network/locations/{location}/usages +func (testsuite *UsageTestSuite) TestUsages() { + var err error + // From step Usages_List + fmt.Println("Call operation: Usages_List") + usagesClient, err := armnetwork.NewUsagesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + usagesClientNewListPager := usagesClient.NewListPager(testsuite.location, nil) + for usagesClientNewListPager.More() { + _, err := usagesClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } +} diff --git a/sdk/resourcemanager/network/armnetwork/virtualnetwork_live_test.go b/sdk/resourcemanager/network/armnetwork/virtualnetwork_live_test.go new file mode 100644 index 000000000000..3b7ed45a478c --- /dev/null +++ b/sdk/resourcemanager/network/armnetwork/virtualnetwork_live_test.go @@ -0,0 +1,258 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armnetwork_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2" + "github.com/stretchr/testify/suite" +) + +type VirtualNetworkTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + virtualNetworkName string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *VirtualNetworkTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/network/armnetwork/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.virtualNetworkName = testutil.GenerateAlphaNumericID(testsuite.T(), "virtualnet", 6) + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name + testsuite.Prepare() +} + +func (testsuite *VirtualNetworkTestSuite) TearDownSuite() { + testsuite.Cleanup() + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestVirtualNetworkTestSuite(t *testing.T) { + suite.Run(t, new(VirtualNetworkTestSuite)) +} + +func (testsuite *VirtualNetworkTestSuite) Prepare() { + var err error + // From step VirtualNetworks_CreateOrUpdate + fmt.Println("Call operation: VirtualNetworks_CreateOrUpdate") + virtualNetworksClient, err := armnetwork.NewVirtualNetworksClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + virtualNetworksClientCreateOrUpdateResponsePoller, err := virtualNetworksClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.virtualNetworkName, armnetwork.VirtualNetwork{ + Location: to.Ptr(testsuite.location), + Properties: &armnetwork.VirtualNetworkPropertiesFormat{ + AddressSpace: &armnetwork.AddressSpace{ + AddressPrefixes: []*string{ + to.Ptr("10.0.0.0/16")}, + }, + FlowTimeoutInMinutes: to.Ptr[int32](10), + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualNetworksClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) +} + +// Microsoft.Network/virtualNetworks/{virtualNetworkName} +func (testsuite *VirtualNetworkTestSuite) TestVirtualNetworks() { + var err error + // From step VirtualNetworks_ListAll + fmt.Println("Call operation: VirtualNetworks_ListAll") + virtualNetworksClient, err := armnetwork.NewVirtualNetworksClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + virtualNetworksClientNewListAllPager := virtualNetworksClient.NewListAllPager(nil) + for virtualNetworksClientNewListAllPager.More() { + _, err := virtualNetworksClientNewListAllPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step VirtualNetworks_List + fmt.Println("Call operation: VirtualNetworks_List") + virtualNetworksClientNewListPager := virtualNetworksClient.NewListPager(testsuite.resourceGroupName, nil) + for virtualNetworksClientNewListPager.More() { + _, err := virtualNetworksClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step VirtualNetworks_Get + fmt.Println("Call operation: VirtualNetworks_Get") + _, err = virtualNetworksClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.virtualNetworkName, &armnetwork.VirtualNetworksClientGetOptions{Expand: nil}) + testsuite.Require().NoError(err) + + // From step VirtualNetworks_ListUsage + fmt.Println("Call operation: VirtualNetworks_ListUsage") + virtualNetworksClientNewListUsagePager := virtualNetworksClient.NewListUsagePager(testsuite.resourceGroupName, testsuite.virtualNetworkName, nil) + for virtualNetworksClientNewListUsagePager.More() { + _, err := virtualNetworksClientNewListUsagePager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step VirtualNetworks_UpdateTags + fmt.Println("Call operation: VirtualNetworks_UpdateTags") + _, err = virtualNetworksClient.UpdateTags(testsuite.ctx, testsuite.resourceGroupName, testsuite.virtualNetworkName, armnetwork.TagsObject{ + Tags: map[string]*string{ + "tag1": to.Ptr("value1"), + "tag2": to.Ptr("value2"), + }, + }, nil) + testsuite.Require().NoError(err) + + // From step VirtualNetworks_ListDdosProtectionStatus + fmt.Println("Call operation: VirtualNetworks_ListDdosProtectionStatus") + virtualNetworksClientListDdosProtectionStatusResponsePoller, err := virtualNetworksClient.BeginListDdosProtectionStatus(testsuite.ctx, testsuite.resourceGroupName, testsuite.virtualNetworkName, &armnetwork.VirtualNetworksClientBeginListDdosProtectionStatusOptions{Top: to.Ptr[int32](75), + SkipToken: nil, + }) + testsuite.Require().NoError(err) + virtualNetworksClientListDdosProtectionStatusResponse, err := testutil.PollForTest(testsuite.ctx, virtualNetworksClientListDdosProtectionStatusResponsePoller) + testsuite.Require().NoError(err) + for (*virtualNetworksClientListDdosProtectionStatusResponse).More() { + _, err := virtualNetworksClientNewListUsagePager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } +} + +// Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName} +func (testsuite *VirtualNetworkTestSuite) TestSubnets() { + subnetName := testutil.GenerateAlphaNumericID(testsuite.T(), "subnetname", 6) + var err error + // From step Subnets_CreateOrUpdate + fmt.Println("Call operation: Subnets_CreateOrUpdate") + subnetsClient, err := armnetwork.NewSubnetsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + subnetsClientCreateOrUpdateResponsePoller, err := subnetsClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.virtualNetworkName, subnetName, armnetwork.Subnet{ + Properties: &armnetwork.SubnetPropertiesFormat{ + AddressPrefix: to.Ptr("10.0.0.0/16"), + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, subnetsClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step Subnets_List + fmt.Println("Call operation: Subnets_List") + subnetsClientNewListPager := subnetsClient.NewListPager(testsuite.resourceGroupName, testsuite.virtualNetworkName, nil) + for subnetsClientNewListPager.More() { + _, err := subnetsClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step Subnets_Get + fmt.Println("Call operation: Subnets_Get") + _, err = subnetsClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.virtualNetworkName, subnetName, &armnetwork.SubnetsClientGetOptions{Expand: nil}) + testsuite.Require().NoError(err) + + // From step Subnets_Delete + fmt.Println("Call operation: Subnets_Delete") + subnetsClientDeleteResponsePoller, err := subnetsClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.virtualNetworkName, subnetName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, subnetsClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} + +// Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName} +func (testsuite *VirtualNetworkTestSuite) TestVirtualNetworkPeerings() { + virtualNetworkPeeringName := testutil.GenerateAlphaNumericID(testsuite.T(), "virtualnet", 6) + var err error + // From step VirtualNetworks_CreateOrUpdate + fmt.Println("Call operation: VirtualNetworks_CreateOrUpdate") + virtualNetworksClient, err := armnetwork.NewVirtualNetworksClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + virtualNetworksClientCreateOrUpdateResponsePoller, err := virtualNetworksClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, virtualNetworkPeeringName, armnetwork.VirtualNetwork{ + Location: to.Ptr(testsuite.location), + Properties: &armnetwork.VirtualNetworkPropertiesFormat{ + AddressSpace: &armnetwork.AddressSpace{ + AddressPrefixes: []*string{ + to.Ptr("11.0.0.0/16")}, + }, + FlowTimeoutInMinutes: to.Ptr[int32](10), + }, + }, nil) + testsuite.Require().NoError(err) + virtualNetworksClientCreateOrUpdateResponse, err := testutil.PollForTest(testsuite.ctx, virtualNetworksClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + virtualNetworkSecondId := *virtualNetworksClientCreateOrUpdateResponse.ID + + // From step VirtualNetworkPeerings_CreateOrUpdate + fmt.Println("Call operation: VirtualNetworkPeerings_CreateOrUpdate") + virtualNetworkPeeringsClient, err := armnetwork.NewVirtualNetworkPeeringsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + virtualNetworkPeeringsClientCreateOrUpdateResponsePoller, err := virtualNetworkPeeringsClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.virtualNetworkName, virtualNetworkPeeringName, armnetwork.VirtualNetworkPeering{ + Properties: &armnetwork.VirtualNetworkPeeringPropertiesFormat{ + AllowForwardedTraffic: to.Ptr(true), + AllowGatewayTransit: to.Ptr(false), + AllowVirtualNetworkAccess: to.Ptr(true), + RemoteVirtualNetwork: &armnetwork.SubResource{ + ID: to.Ptr(virtualNetworkSecondId), + }, + UseRemoteGateways: to.Ptr(false), + }, + }, &armnetwork.VirtualNetworkPeeringsClientBeginCreateOrUpdateOptions{SyncRemoteAddressSpace: nil}) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualNetworkPeeringsClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step VirtualNetworkPeerings_List + fmt.Println("Call operation: VirtualNetworkPeerings_List") + virtualNetworkPeeringsClientNewListPager := virtualNetworkPeeringsClient.NewListPager(testsuite.resourceGroupName, testsuite.virtualNetworkName, nil) + for virtualNetworkPeeringsClientNewListPager.More() { + _, err := virtualNetworkPeeringsClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step VirtualNetworkPeerings_Get + fmt.Println("Call operation: VirtualNetworkPeerings_Get") + _, err = virtualNetworkPeeringsClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.virtualNetworkName, virtualNetworkPeeringName, nil) + testsuite.Require().NoError(err) + + // From step VirtualNetworkPeerings_Delete + fmt.Println("Call operation: VirtualNetworkPeerings_Delete") + virtualNetworkPeeringsClientDeleteResponsePoller, err := virtualNetworkPeeringsClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.virtualNetworkName, virtualNetworkPeeringName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualNetworkPeeringsClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} + +func (testsuite *VirtualNetworkTestSuite) Cleanup() { + var err error + // From step VirtualNetworks_Delete + fmt.Println("Call operation: VirtualNetworks_Delete") + virtualNetworksClient, err := armnetwork.NewVirtualNetworksClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + virtualNetworksClientDeleteResponsePoller, err := virtualNetworksClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.virtualNetworkName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualNetworksClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/network/armnetwork/virtualnetworkgateway_live_test.go b/sdk/resourcemanager/network/armnetwork/virtualnetworkgateway_live_test.go new file mode 100644 index 000000000000..24d9d51edc26 --- /dev/null +++ b/sdk/resourcemanager/network/armnetwork/virtualnetworkgateway_live_test.go @@ -0,0 +1,237 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armnetwork_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2" + "github.com/stretchr/testify/suite" +) + +type VirtualNetworkGatewayTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + publicIpAddressId string + publicIpAddressName string + subnetId string + virtualNetworkGatewayName string + virtualNetworkName string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *VirtualNetworkGatewayTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/network/armnetwork/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.publicIpAddressName = testutil.GenerateAlphaNumericID(testsuite.T(), "publicipadgateway", 6) + testsuite.virtualNetworkGatewayName = testutil.GenerateAlphaNumericID(testsuite.T(), "virtualnetgateway", 6) + testsuite.virtualNetworkName = testutil.GenerateAlphaNumericID(testsuite.T(), "virtualnetgateway", 6) + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name + testsuite.Prepare() +} + +func (testsuite *VirtualNetworkGatewayTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestVirtualNetworkGatewayTestSuite(t *testing.T) { + suite.Run(t, new(VirtualNetworkGatewayTestSuite)) +} + +func (testsuite *VirtualNetworkGatewayTestSuite) Prepare() { + var err error + // From step VirtualNetworks_CreateOrUpdate + fmt.Println("Call operation: VirtualNetworks_CreateOrUpdate") + virtualNetworksClient, err := armnetwork.NewVirtualNetworksClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + virtualNetworksClientCreateOrUpdateResponsePoller, err := virtualNetworksClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.virtualNetworkName, armnetwork.VirtualNetwork{ + Location: to.Ptr(testsuite.location), + Properties: &armnetwork.VirtualNetworkPropertiesFormat{ + AddressSpace: &armnetwork.AddressSpace{ + AddressPrefixes: []*string{ + to.Ptr("10.0.0.0/16")}, + }, + Subnets: []*armnetwork.Subnet{ + { + Name: to.Ptr("GatewaySubnet"), + Properties: &armnetwork.SubnetPropertiesFormat{ + AddressPrefix: to.Ptr("10.0.0.0/24"), + }, + }}, + }, + }, nil) + testsuite.Require().NoError(err) + var virtualNetworksClientCreateOrUpdateResponse *armnetwork.VirtualNetworksClientCreateOrUpdateResponse + virtualNetworksClientCreateOrUpdateResponse, err = testutil.PollForTest(testsuite.ctx, virtualNetworksClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + testsuite.subnetId = *virtualNetworksClientCreateOrUpdateResponse.Properties.Subnets[0].ID + + // From step PublicIPAddresses_CreateOrUpdate + fmt.Println("Call operation: PublicIPAddresses_CreateOrUpdate") + publicIPAddressesClient, err := armnetwork.NewPublicIPAddressesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + publicIPAddressesClientCreateOrUpdateResponsePoller, err := publicIPAddressesClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.publicIpAddressName, armnetwork.PublicIPAddress{ + Location: to.Ptr(testsuite.location), + }, nil) + testsuite.Require().NoError(err) + var publicIPAddressesClientCreateOrUpdateResponse *armnetwork.PublicIPAddressesClientCreateOrUpdateResponse + publicIPAddressesClientCreateOrUpdateResponse, err = testutil.PollForTest(testsuite.ctx, publicIPAddressesClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + testsuite.publicIpAddressId = *publicIPAddressesClientCreateOrUpdateResponse.ID + + // From step VirtualNetworkGateways_CreateOrUpdate + fmt.Println("Call operation: VirtualNetworkGateways_CreateOrUpdate") + virtualNetworkGatewaysClient, err := armnetwork.NewVirtualNetworkGatewaysClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + virtualNetworkGatewaysClientCreateOrUpdateResponsePoller, err := virtualNetworkGatewaysClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.virtualNetworkGatewayName, armnetwork.VirtualNetworkGateway{ + Location: to.Ptr(testsuite.location), + Properties: &armnetwork.VirtualNetworkGatewayPropertiesFormat{ + GatewayType: to.Ptr(armnetwork.VirtualNetworkGatewayTypeVPN), + IPConfigurations: []*armnetwork.VirtualNetworkGatewayIPConfiguration{ + { + Name: to.Ptr("gwipconfig1"), + Properties: &armnetwork.VirtualNetworkGatewayIPConfigurationPropertiesFormat{ + PrivateIPAllocationMethod: to.Ptr(armnetwork.IPAllocationMethodDynamic), + PublicIPAddress: &armnetwork.SubResource{ + ID: to.Ptr(testsuite.publicIpAddressId), + }, + Subnet: &armnetwork.SubResource{ + ID: to.Ptr(testsuite.subnetId), + }, + }, + }}, + VPNType: to.Ptr(armnetwork.VPNTypeRouteBased), + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualNetworkGatewaysClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) +} + +// Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName} +func (testsuite *VirtualNetworkGatewayTestSuite) TestVirtualNetworkGateways() { + var err error + // From step VirtualNetworkGateways_List + fmt.Println("Call operation: VirtualNetworkGateways_List") + virtualNetworkGatewaysClient, err := armnetwork.NewVirtualNetworkGatewaysClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + virtualNetworkGatewaysClientNewListPager := virtualNetworkGatewaysClient.NewListPager(testsuite.resourceGroupName, nil) + for virtualNetworkGatewaysClientNewListPager.More() { + _, err := virtualNetworkGatewaysClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step VirtualNetworkGateways_Get + fmt.Println("Call operation: VirtualNetworkGateways_Get") + _, err = virtualNetworkGatewaysClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.virtualNetworkGatewayName, nil) + testsuite.Require().NoError(err) + + // From step VirtualNetworkGateways_ListConnections + fmt.Println("Call operation: VirtualNetworkGateways_ListConnections") + virtualNetworkGatewaysClientNewListConnectionsPager := virtualNetworkGatewaysClient.NewListConnectionsPager(testsuite.resourceGroupName, testsuite.virtualNetworkGatewayName, nil) + for virtualNetworkGatewaysClientNewListConnectionsPager.More() { + _, err := virtualNetworkGatewaysClientNewListConnectionsPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step VirtualNetworkGateways_UpdateTags + fmt.Println("Call operation: VirtualNetworkGateways_UpdateTags") + virtualNetworkGatewaysClientUpdateTagsResponsePoller, err := virtualNetworkGatewaysClient.BeginUpdateTags(testsuite.ctx, testsuite.resourceGroupName, testsuite.virtualNetworkGatewayName, armnetwork.TagsObject{ + Tags: map[string]*string{ + "tag1": to.Ptr("value1"), + "tag2": to.Ptr("value2"), + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualNetworkGatewaysClientUpdateTagsResponsePoller) + testsuite.Require().NoError(err) + + // From step VirtualNetworkGateways_Delete + fmt.Println("Call operation: VirtualNetworkGateways_Delete") + virtualNetworkGatewaysClientDeleteResponsePoller, err := virtualNetworkGatewaysClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.virtualNetworkGatewayName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualNetworkGatewaysClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} + +// Microsoft.Network/localNetworkGateways/{localNetworkGatewayName} +func (testsuite *VirtualNetworkGatewayTestSuite) TestLocalNetworkGateways() { + localNetworkGatewayName := testutil.GenerateAlphaNumericID(testsuite.T(), "localnetwo", 6) + var err error + // From step LocalNetworkGateways_CreateOrUpdate + fmt.Println("Call operation: LocalNetworkGateways_CreateOrUpdate") + localNetworkGatewaysClient, err := armnetwork.NewLocalNetworkGatewaysClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + localNetworkGatewaysClientCreateOrUpdateResponsePoller, err := localNetworkGatewaysClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, localNetworkGatewayName, armnetwork.LocalNetworkGateway{ + Location: to.Ptr(testsuite.location), + Properties: &armnetwork.LocalNetworkGatewayPropertiesFormat{ + GatewayIPAddress: to.Ptr("11.12.13.14"), + LocalNetworkAddressSpace: &armnetwork.AddressSpace{ + AddressPrefixes: []*string{ + to.Ptr("10.1.0.0/16")}, + }, + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, localNetworkGatewaysClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step LocalNetworkGateways_List + fmt.Println("Call operation: LocalNetworkGateways_List") + localNetworkGatewaysClientNewListPager := localNetworkGatewaysClient.NewListPager(testsuite.resourceGroupName, nil) + for localNetworkGatewaysClientNewListPager.More() { + _, err := localNetworkGatewaysClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step LocalNetworkGateways_Get + fmt.Println("Call operation: LocalNetworkGateways_Get") + _, err = localNetworkGatewaysClient.Get(testsuite.ctx, testsuite.resourceGroupName, localNetworkGatewayName, nil) + testsuite.Require().NoError(err) + + // From step LocalNetworkGateways_UpdateTags + fmt.Println("Call operation: LocalNetworkGateways_UpdateTags") + _, err = localNetworkGatewaysClient.UpdateTags(testsuite.ctx, testsuite.resourceGroupName, localNetworkGatewayName, armnetwork.TagsObject{ + Tags: map[string]*string{ + "tag1": to.Ptr("value1"), + "tag2": to.Ptr("value2"), + }, + }, nil) + testsuite.Require().NoError(err) + + // From step LocalNetworkGateways_Delete + fmt.Println("Call operation: LocalNetworkGateways_Delete") + localNetworkGatewaysClientDeleteResponsePoller, err := localNetworkGatewaysClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, localNetworkGatewayName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, localNetworkGatewaysClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/network/armnetwork/virtualnetworks_client_live_test.go b/sdk/resourcemanager/network/armnetwork/virtualnetworks_client_live_test.go deleted file mode 100644 index 178f7cd211ce..000000000000 --- a/sdk/resourcemanager/network/armnetwork/virtualnetworks_client_live_test.go +++ /dev/null @@ -1,117 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. See License.txt in the project root for license information. - -package armnetwork_test - -import ( - "context" - "fmt" - "testing" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" - "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2" - "github.com/stretchr/testify/suite" -) - -type VirtualNetworksClientTestSuite struct { - suite.Suite - - ctx context.Context - cred azcore.TokenCredential - options *arm.ClientOptions - location string - resourceGroupName string - subscriptionID string -} - -func (testsuite *VirtualNetworksClientTestSuite) SetupSuite() { - testsuite.ctx = context.Background() - testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) - testsuite.location = testutil.GetEnv("LOCATION", "eastus") - testsuite.subscriptionID = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") - testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/network/armnetwork/testdata") - resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionID, testsuite.cred, testsuite.options, testsuite.location) - testsuite.Require().NoError(err) - testsuite.resourceGroupName = *resourceGroup.Name -} - -func (testsuite *VirtualNetworksClientTestSuite) TearDownSuite() { - _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionID, testsuite.cred, testsuite.options, testsuite.resourceGroupName) - testsuite.Require().NoError(err) - testutil.StopRecording(testsuite.T()) -} - -func TestVirtualNetworksClient(t *testing.T) { - suite.Run(t, new(VirtualNetworksClientTestSuite)) -} - -func (testsuite *VirtualNetworksClientTestSuite) TestVirtualMachineCRUD() { - // create virtual network - fmt.Println("Call operation: VirtualNetworks_CreateOrUpdate") - vnClient, err := armnetwork.NewVirtualNetworksClient(testsuite.subscriptionID, testsuite.cred, testsuite.options) - testsuite.Require().NoError(err) - vnName := "go-test-vn" - vnPoller, err := vnClient.BeginCreateOrUpdate( - testsuite.ctx, - testsuite.resourceGroupName, - vnName, - armnetwork.VirtualNetwork{ - Location: to.Ptr(testsuite.location), - Properties: &armnetwork.VirtualNetworkPropertiesFormat{ - AddressSpace: &armnetwork.AddressSpace{ - AddressPrefixes: []*string{ - to.Ptr("10.1.0.0/16"), - }, - }, - }, - }, - nil, - ) - testsuite.Require().NoError(err) - vnResp, err := testutil.PollForTest(testsuite.ctx, vnPoller) - testsuite.Require().NoError(err) - testsuite.Require().Equal(vnName, *vnResp.Name) - - //virtual network update tags - fmt.Println("Call operation: VirtualNetworks_UpdateTags") - tagResp, err := vnClient.UpdateTags( - testsuite.ctx, - testsuite.resourceGroupName, - vnName, - armnetwork.TagsObject{ - Tags: map[string]*string{ - "tag1": to.Ptr("value1"), - "tag2": to.Ptr("value2"), - }, - }, - nil, - ) - testsuite.Require().NoError(err) - testsuite.Require().Equal("value1", *tagResp.Tags["tag1"]) - - // get virtual network - fmt.Println("Call operation: VirtualNetworks_Get") - vnResp2, err := vnClient.Get(testsuite.ctx, testsuite.resourceGroupName, vnName, nil) - testsuite.Require().NoError(err) - testsuite.Require().Equal(vnName, *vnResp2.Name) - - //virtual network list - fmt.Println("Call operation: VirtualNetworks_List") - listPager := vnClient.NewListPager(testsuite.resourceGroupName, nil) - testsuite.Require().Equal(true, listPager.More()) - - //virtual network delete - fmt.Println("Call operation: VirtualNetworks_Delete") - delPoller, err := vnClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, vnName, nil) - testsuite.Require().NoError(err) - delResp, err := testutil.PollForTest(testsuite.ctx, delPoller) - testsuite.Require().NoError(err) - //testsuite.Require().Equal(200, delResp.RawResponse.StatusCode) - _ = delResp -} diff --git a/sdk/resourcemanager/network/armnetwork/virtualwan_live_test.go b/sdk/resourcemanager/network/armnetwork/virtualwan_live_test.go new file mode 100644 index 000000000000..717a0ae720bb --- /dev/null +++ b/sdk/resourcemanager/network/armnetwork/virtualwan_live_test.go @@ -0,0 +1,461 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armnetwork_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2" + "github.com/stretchr/testify/suite" +) + +type VirtualWanTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + gatewayName string + virtualHubId string + virtualHubName string + virtualWANName string + virtualWanId string + vpnServerConfigurationName string + vpnSiteName string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *VirtualWanTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/network/armnetwork/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.gatewayName = testutil.GenerateAlphaNumericID(testsuite.T(), "gatewaynam", 6) + testsuite.virtualHubName = testutil.GenerateAlphaNumericID(testsuite.T(), "virtualhub", 6) + testsuite.virtualWANName = testutil.GenerateAlphaNumericID(testsuite.T(), "virtualwan", 6) + testsuite.vpnServerConfigurationName = testutil.GenerateAlphaNumericID(testsuite.T(), "vpnserverc", 6) + testsuite.vpnSiteName = testutil.GenerateAlphaNumericID(testsuite.T(), "vpnsitenam", 6) + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name + testsuite.Prepare() +} + +func (testsuite *VirtualWanTestSuite) TearDownSuite() { + testsuite.Cleanup() + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestVirtualWanTestSuite(t *testing.T) { + suite.Run(t, new(VirtualWanTestSuite)) +} + +func (testsuite *VirtualWanTestSuite) Prepare() { + var err error + // From step VirtualWans_CreateOrUpdate + fmt.Println("Call operation: VirtualWans_CreateOrUpdate") + virtualWansClient, err := armnetwork.NewVirtualWansClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + virtualWansClientCreateOrUpdateResponsePoller, err := virtualWansClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, "wan1", armnetwork.VirtualWAN{ + Location: to.Ptr(testsuite.location), + Tags: map[string]*string{ + "key1": to.Ptr("value1"), + }, + Properties: &armnetwork.VirtualWanProperties{ + Type: to.Ptr("Standard"), + DisableVPNEncryption: to.Ptr(false), + }, + }, nil) + testsuite.Require().NoError(err) + var virtualWansClientCreateOrUpdateResponse *armnetwork.VirtualWansClientCreateOrUpdateResponse + virtualWansClientCreateOrUpdateResponse, err = testutil.PollForTest(testsuite.ctx, virtualWansClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + testsuite.virtualWanId = *virtualWansClientCreateOrUpdateResponse.ID + + // From step VpnSites_CreateOrUpdate + fmt.Println("Call operation: VPNSites_CreateOrUpdate") + vPNSitesClient, err := armnetwork.NewVPNSitesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + vPNSitesClientCreateOrUpdateResponsePoller, err := vPNSitesClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.vpnSiteName, armnetwork.VPNSite{ + Location: to.Ptr(testsuite.location), + Tags: map[string]*string{ + "key1": to.Ptr("value1"), + }, + Properties: &armnetwork.VPNSiteProperties{ + AddressSpace: &armnetwork.AddressSpace{ + AddressPrefixes: []*string{ + to.Ptr("10.0.0.0/16")}, + }, + IsSecuritySite: to.Ptr(false), + O365Policy: &armnetwork.O365PolicyProperties{ + BreakOutCategories: &armnetwork.O365BreakOutCategoryPolicies{ + Default: to.Ptr(false), + Allow: to.Ptr(true), + Optimize: to.Ptr(true), + }, + }, + VirtualWan: &armnetwork.SubResource{ + ID: to.Ptr(testsuite.virtualWanId), + }, + VPNSiteLinks: []*armnetwork.VPNSiteLink{ + { + Name: to.Ptr("vpnSiteLink1"), + Properties: &armnetwork.VPNSiteLinkProperties{ + BgpProperties: &armnetwork.VPNLinkBgpSettings{ + Asn: to.Ptr[int64](1234), + BgpPeeringAddress: to.Ptr("192.168.0.0"), + }, + IPAddress: to.Ptr("50.50.50.56"), + LinkProperties: &armnetwork.VPNLinkProviderProperties{ + LinkProviderName: to.Ptr("vendor1"), + LinkSpeedInMbps: to.Ptr[int32](0), + }, + }, + }}, + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, vPNSitesClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step VirtualHubs_CreateOrUpdate + fmt.Println("Call operation: VirtualHubs_CreateOrUpdate") + virtualHubsClient, err := armnetwork.NewVirtualHubsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + virtualHubsClientCreateOrUpdateResponsePoller, err := virtualHubsClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.virtualHubName, armnetwork.VirtualHub{ + Location: to.Ptr(testsuite.location), + Tags: map[string]*string{ + "key1": to.Ptr("value1"), + }, + Properties: &armnetwork.VirtualHubProperties{ + AddressPrefix: to.Ptr("10.168.0.0/24"), + SKU: to.Ptr("Standard"), + VirtualWan: &armnetwork.SubResource{ + ID: to.Ptr(testsuite.virtualWanId), + }, + }, + }, nil) + testsuite.Require().NoError(err) + var virtualHubsClientCreateOrUpdateResponse *armnetwork.VirtualHubsClientCreateOrUpdateResponse + virtualHubsClientCreateOrUpdateResponse, err = testutil.PollForTest(testsuite.ctx, virtualHubsClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + testsuite.virtualHubId = *virtualHubsClientCreateOrUpdateResponse.ID +} + +// Microsoft.Network/virtualWans/{VirtualWANName} +func (testsuite *VirtualWanTestSuite) TestVirtualWans() { + var err error + // From step VirtualWans_List + fmt.Println("Call operation: VirtualWans_List") + virtualWansClient, err := armnetwork.NewVirtualWansClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + virtualWansClientNewListPager := virtualWansClient.NewListPager(nil) + for virtualWansClientNewListPager.More() { + _, err := virtualWansClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step VirtualWans_ListByResourceGroup + fmt.Println("Call operation: VirtualWans_ListByResourceGroup") + virtualWansClientNewListByResourceGroupPager := virtualWansClient.NewListByResourceGroupPager(testsuite.resourceGroupName, nil) + for virtualWansClientNewListByResourceGroupPager.More() { + _, err := virtualWansClientNewListByResourceGroupPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step VirtualWans_Get + fmt.Println("Call operation: VirtualWans_Get") + _, err = virtualWansClient.Get(testsuite.ctx, testsuite.resourceGroupName, "wan1", nil) + testsuite.Require().NoError(err) + + // From step VirtualWans_UpdateTags + fmt.Println("Call operation: VirtualWans_UpdateTags") + _, err = virtualWansClient.UpdateTags(testsuite.ctx, testsuite.resourceGroupName, "wan1", armnetwork.TagsObject{ + Tags: map[string]*string{ + "key1": to.Ptr("value1"), + "key2": to.Ptr("value2"), + }, + }, nil) + testsuite.Require().NoError(err) +} + +// Microsoft.Network/vpnSites/{vpnSiteName} +func (testsuite *VirtualWanTestSuite) TestVpnSites() { + var vpnSiteLinkName string + var err error + // From step VpnSites_List + fmt.Println("Call operation: VPNSites_List") + vPNSitesClient, err := armnetwork.NewVPNSitesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + vPNSitesClientNewListPager := vPNSitesClient.NewListPager(nil) + for vPNSitesClientNewListPager.More() { + _, err := vPNSitesClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step VpnSites_ListByResourceGroup + fmt.Println("Call operation: VPNSites_ListByResourceGroup") + vPNSitesClientNewListByResourceGroupPager := vPNSitesClient.NewListByResourceGroupPager(testsuite.resourceGroupName, nil) + for vPNSitesClientNewListByResourceGroupPager.More() { + _, err := vPNSitesClientNewListByResourceGroupPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step VpnSites_Get + fmt.Println("Call operation: VPNSites_Get") + _, err = vPNSitesClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.vpnSiteName, nil) + testsuite.Require().NoError(err) + + // From step VpnSites_UpdateTags + fmt.Println("Call operation: VPNSites_UpdateTags") + _, err = vPNSitesClient.UpdateTags(testsuite.ctx, testsuite.resourceGroupName, testsuite.vpnSiteName, armnetwork.TagsObject{ + Tags: map[string]*string{ + "key1": to.Ptr("value1"), + "key2": to.Ptr("value2"), + }, + }, nil) + testsuite.Require().NoError(err) + + // From step VpnSiteLinks_ListByVpnSite + fmt.Println("Call operation: VPNSiteLinks_ListByVPNSite") + vPNSiteLinksClient, err := armnetwork.NewVPNSiteLinksClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + vPNSiteLinksClientNewListByVPNSitePager := vPNSiteLinksClient.NewListByVPNSitePager(testsuite.resourceGroupName, testsuite.vpnSiteName, nil) + for vPNSiteLinksClientNewListByVPNSitePager.More() { + nextResult, err := vPNSiteLinksClientNewListByVPNSitePager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + + vpnSiteLinkName = *nextResult.Value[0].Name + break + } + + // From step VpnSiteLinks_Get + fmt.Println("Call operation: VPNSiteLinks_Get") + _, err = vPNSiteLinksClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.vpnSiteName, vpnSiteLinkName, nil) + testsuite.Require().NoError(err) +} + +// Microsoft.Network/virtualHubs/{virtualHubName} +func (testsuite *VirtualWanTestSuite) TestVirtualHubs() { + var err error + // From step VirtualHubs_List + fmt.Println("Call operation: VirtualHubs_List") + virtualHubsClient, err := armnetwork.NewVirtualHubsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + virtualHubsClientNewListPager := virtualHubsClient.NewListPager(nil) + for virtualHubsClientNewListPager.More() { + _, err := virtualHubsClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step VirtualHubs_ListByResourceGroup + fmt.Println("Call operation: VirtualHubs_ListByResourceGroup") + virtualHubsClientNewListByResourceGroupPager := virtualHubsClient.NewListByResourceGroupPager(testsuite.resourceGroupName, nil) + for virtualHubsClientNewListByResourceGroupPager.More() { + _, err := virtualHubsClientNewListByResourceGroupPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step VirtualHubs_Get + fmt.Println("Call operation: VirtualHubs_Get") + _, err = virtualHubsClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.virtualHubName, nil) + testsuite.Require().NoError(err) + + // From step VirtualHubs_UpdateTags + fmt.Println("Call operation: VirtualHubs_UpdateTags") + _, err = virtualHubsClient.UpdateTags(testsuite.ctx, testsuite.resourceGroupName, testsuite.virtualHubName, armnetwork.TagsObject{ + Tags: map[string]*string{ + "key1": to.Ptr("value1"), + "key2": to.Ptr("value2"), + }, + }, nil) + testsuite.Require().NoError(err) +} + +// Microsoft.Network/virtualHubs/{virtualHubName}/routeMaps/{routeMapName} +func (testsuite *VirtualWanTestSuite) TestRouteMaps() { + routeMapName := testutil.GenerateAlphaNumericID(testsuite.T(), "routemapna", 6) + var err error + // From step RouteMaps_CreateOrUpdate + fmt.Println("Call operation: RouteMaps_CreateOrUpdate") + routeMapsClient, err := armnetwork.NewRouteMapsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + routeMapsClientCreateOrUpdateResponsePoller, err := routeMapsClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.virtualHubName, routeMapName, armnetwork.RouteMap{ + Properties: &armnetwork.RouteMapProperties{ + AssociatedInboundConnections: []*string{ + to.Ptr("/subscriptions/" + testsuite.subscriptionId + "/resourceGroups/" + testsuite.resourceGroupName + "/providers/Microsoft.Network/expressRouteGateways/exrGateway1/expressRouteConnections/exrConn1")}, + AssociatedOutboundConnections: []*string{}, + Rules: []*armnetwork.RouteMapRule{ + { + Name: to.Ptr("rule1"), + Actions: []*armnetwork.Action{ + { + Type: to.Ptr(armnetwork.RouteMapActionTypeAdd), + Parameters: []*armnetwork.Parameter{ + { + AsPath: []*string{ + to.Ptr("22334")}, + Community: []*string{}, + RoutePrefix: []*string{}, + }}, + }}, + MatchCriteria: []*armnetwork.Criterion{ + { + AsPath: []*string{}, + Community: []*string{}, + MatchCondition: to.Ptr(armnetwork.RouteMapMatchConditionContains), + RoutePrefix: []*string{ + to.Ptr("10.0.0.0/8")}, + }}, + NextStepIfMatched: to.Ptr(armnetwork.NextStepContinue), + }}, + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, routeMapsClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step RouteMaps_List + fmt.Println("Call operation: RouteMaps_List") + routeMapsClientNewListPager := routeMapsClient.NewListPager(testsuite.resourceGroupName, testsuite.virtualHubName, nil) + for routeMapsClientNewListPager.More() { + _, err := routeMapsClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step RouteMaps_Get + fmt.Println("Call operation: RouteMaps_Get") + _, err = routeMapsClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.virtualHubName, routeMapName, nil) + testsuite.Require().NoError(err) + + // From step RouteMaps_Delete + fmt.Println("Call operation: RouteMaps_Delete") + routeMapsClientDeleteResponsePoller, err := routeMapsClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.virtualHubName, routeMapName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, routeMapsClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} + +// Microsoft.Network/virtualHubs/{virtualHubName}/hubRouteTables/{routeTableName} +func (testsuite *VirtualWanTestSuite) TestHubRouteTables() { + var err error + // From step HubRouteTables_CreateOrUpdate + fmt.Println("Call operation: HubRouteTables_CreateOrUpdate") + hubRouteTablesClient, err := armnetwork.NewHubRouteTablesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + hubRouteTablesClientCreateOrUpdateResponsePoller, err := hubRouteTablesClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.virtualHubName, "hubRouteTable1", armnetwork.HubRouteTable{ + Properties: &armnetwork.HubRouteTableProperties{ + Labels: []*string{ + to.Ptr("label1"), + to.Ptr("label2"), + }, + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, hubRouteTablesClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step HubRouteTables_List + fmt.Println("Call operation: HubRouteTables_List") + hubRouteTablesClientNewListPager := hubRouteTablesClient.NewListPager(testsuite.resourceGroupName, testsuite.virtualHubName, nil) + for hubRouteTablesClientNewListPager.More() { + _, err := hubRouteTablesClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step HubRouteTables_Get + fmt.Println("Call operation: HubRouteTables_Get") + _, err = hubRouteTablesClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.virtualHubName, "hubRouteTable1", nil) + testsuite.Require().NoError(err) + + // From step HubRouteTables_Delete + fmt.Println("Call operation: HubRouteTables_Delete") + hubRouteTablesClientDeleteResponsePoller, err := hubRouteTablesClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.virtualHubName, "hubRouteTable1", nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, hubRouteTablesClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} + +// Microsoft.Network/virtualHubs/{virtualHubName}/routingIntent/{routingIntentName} +func (testsuite *VirtualWanTestSuite) TestRoutingIntent() { + var err error + // From step RoutingIntent_CreateOrUpdate + fmt.Println("Call operation: RoutingIntent_CreateOrUpdate") + routingIntentClient, err := armnetwork.NewRoutingIntentClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + routingIntentClientCreateOrUpdateResponsePoller, err := routingIntentClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.virtualHubName, "Intent1", armnetwork.RoutingIntent{ + Properties: &armnetwork.RoutingIntentProperties{}, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, routingIntentClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step RoutingIntent_List + fmt.Println("Call operation: RoutingIntent_List") + routingIntentClientNewListPager := routingIntentClient.NewListPager(testsuite.resourceGroupName, testsuite.virtualHubName, nil) + for routingIntentClientNewListPager.More() { + _, err := routingIntentClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step RoutingIntent_Get + fmt.Println("Call operation: RoutingIntent_Get") + _, err = routingIntentClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.virtualHubName, "Intent1", nil) + testsuite.Require().NoError(err) + + // From step RoutingIntent_Delete + fmt.Println("Call operation: RoutingIntent_Delete") + routingIntentClientDeleteResponsePoller, err := routingIntentClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.virtualHubName, "Intent1", nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, routingIntentClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} + +func (testsuite *VirtualWanTestSuite) Cleanup() { + var err error + // From step VpnSites_Delete + fmt.Println("Call operation: VPNSites_Delete") + vPNSitesClient, err := armnetwork.NewVPNSitesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + vPNSitesClientDeleteResponsePoller, err := vPNSitesClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.vpnSiteName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, vPNSitesClientDeleteResponsePoller) + testsuite.Require().NoError(err) + + // From step VirtualWans_Delete + fmt.Println("Call operation: VirtualWans_Delete") + virtualWansClient, err := armnetwork.NewVirtualWansClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + virtualWansClientDeleteResponsePoller, err := virtualWansClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, "virtualWan1", nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, virtualWansClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/network/armnetwork/webapplicationfirewall_live_test.go b/sdk/resourcemanager/network/armnetwork/webapplicationfirewall_live_test.go new file mode 100644 index 000000000000..546757f835d7 --- /dev/null +++ b/sdk/resourcemanager/network/armnetwork/webapplicationfirewall_live_test.go @@ -0,0 +1,120 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package armnetwork_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2" + "github.com/stretchr/testify/suite" +) + +type WebapplicationfirewallTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + policyName string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *WebapplicationfirewallTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/network/armnetwork/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.policyName = testutil.GenerateAlphaNumericID(testsuite.T(), "wafpolicyname", 6) + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name +} + +func (testsuite *WebapplicationfirewallTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestWebapplicationfirewallTestSuite(t *testing.T) { + suite.Run(t, new(WebapplicationfirewallTestSuite)) +} + +// Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName} +func (testsuite *WebapplicationfirewallTestSuite) TestWebApplicationFirewallPolicies() { + var err error + // From step WebApplicationFirewallPolicies_CreateOrUpdate + fmt.Println("Call operation: WebApplicationFirewallPolicies_CreateOrUpdate") + webApplicationFirewallPoliciesClient, err := armnetwork.NewWebApplicationFirewallPoliciesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + _, err = webApplicationFirewallPoliciesClient.CreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.policyName, armnetwork.WebApplicationFirewallPolicy{ + Location: to.Ptr(testsuite.location), + Properties: &armnetwork.WebApplicationFirewallPolicyPropertiesFormat{ + ManagedRules: &armnetwork.ManagedRulesDefinition{ + ManagedRuleSets: []*armnetwork.ManagedRuleSet{ + { + RuleGroupOverrides: []*armnetwork.ManagedRuleGroupOverride{ + { + RuleGroupName: to.Ptr("REQUEST-931-APPLICATION-ATTACK-RFI"), + Rules: []*armnetwork.ManagedRuleOverride{ + { + Action: to.Ptr(armnetwork.ActionTypeLog), + RuleID: to.Ptr("931120"), + State: to.Ptr(armnetwork.ManagedRuleEnabledStateEnabled), + }}, + }}, + RuleSetType: to.Ptr("OWASP"), + RuleSetVersion: to.Ptr("3.2"), + }}, + }, + }, + }, nil) + testsuite.Require().NoError(err) + + // From step WebApplicationFirewallPolicies_List + fmt.Println("Call operation: WebApplicationFirewallPolicies_List") + webApplicationFirewallPoliciesClientNewListPager := webApplicationFirewallPoliciesClient.NewListPager(testsuite.resourceGroupName, nil) + for webApplicationFirewallPoliciesClientNewListPager.More() { + _, err := webApplicationFirewallPoliciesClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step WebApplicationFirewallPolicies_ListAll + fmt.Println("Call operation: WebApplicationFirewallPolicies_ListAll") + webApplicationFirewallPoliciesClientNewListAllPager := webApplicationFirewallPoliciesClient.NewListAllPager(nil) + for webApplicationFirewallPoliciesClientNewListAllPager.More() { + _, err := webApplicationFirewallPoliciesClientNewListAllPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step WebApplicationFirewallPolicies_Get + fmt.Println("Call operation: WebApplicationFirewallPolicies_Get") + _, err = webApplicationFirewallPoliciesClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.policyName, nil) + testsuite.Require().NoError(err) + + // From step WebApplicationFirewallPolicies_Delete + fmt.Println("Call operation: WebApplicationFirewallPolicies_Delete") + webApplicationFirewallPoliciesClientDeleteResponsePoller, err := webApplicationFirewallPoliciesClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.policyName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, webApplicationFirewallPoliciesClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} From 36f766d2feba70f64bed923c23b57602998e16c8 Mon Sep 17 00:00:00 2001 From: Peng Jiahui <46921893+Alancere@users.noreply.github.com> Date: Thu, 4 May 2023 16:20:54 +0800 Subject: [PATCH 18/50] add sdk/resourcemanager/cosmos/armcosmos live test (#20705) * add sdk/resourcemanager/cosmos/armcosmos live test * update assets.json * update assets.json * update assets.json * update assets.json --- .../cosmos/armcosmos/assets.json | 2 +- .../armcosmos/cassandraresources_live_test.go | 266 ++++++++++ .../armcosmos/databaseaccounts_live_test.go | 223 +++++++++ sdk/resourcemanager/cosmos/armcosmos/go.mod | 18 +- sdk/resourcemanager/cosmos/armcosmos/go.sum | 46 +- .../armcosmos/gremlinresources_live_test.go | 293 +++++++++++ .../armcosmos/mongodbresources_live_test.go | 256 ++++++++++ .../cosmos/armcosmos/operations_live_test.go | 88 ++++ .../privateendpointconnection_live_test.go | 293 +++++++++++ .../cosmos/armcosmos/services_live_test.go | 122 +++++ .../armcosmos/sqlresources_live_test.go | 465 ++++++++++++++++++ .../armcosmos/tableresources_live_test.go | 166 +++++++ 12 files changed, 2220 insertions(+), 18 deletions(-) create mode 100644 sdk/resourcemanager/cosmos/armcosmos/cassandraresources_live_test.go create mode 100644 sdk/resourcemanager/cosmos/armcosmos/databaseaccounts_live_test.go create mode 100644 sdk/resourcemanager/cosmos/armcosmos/gremlinresources_live_test.go create mode 100644 sdk/resourcemanager/cosmos/armcosmos/mongodbresources_live_test.go create mode 100644 sdk/resourcemanager/cosmos/armcosmos/operations_live_test.go create mode 100644 sdk/resourcemanager/cosmos/armcosmos/privateendpointconnection_live_test.go create mode 100644 sdk/resourcemanager/cosmos/armcosmos/services_live_test.go create mode 100644 sdk/resourcemanager/cosmos/armcosmos/sqlresources_live_test.go create mode 100644 sdk/resourcemanager/cosmos/armcosmos/tableresources_live_test.go diff --git a/sdk/resourcemanager/cosmos/armcosmos/assets.json b/sdk/resourcemanager/cosmos/armcosmos/assets.json index 583d0157f601..512680b33eee 100644 --- a/sdk/resourcemanager/cosmos/armcosmos/assets.json +++ b/sdk/resourcemanager/cosmos/armcosmos/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "go", "TagPrefix": "go/resourcemanager/cosmos/armcosmos", - "Tag": "go/resourcemanager/cosmos/armcosmos_f300ab67c7" + "Tag": "go/resourcemanager/cosmos/armcosmos_b256b80d8d" } diff --git a/sdk/resourcemanager/cosmos/armcosmos/cassandraresources_live_test.go b/sdk/resourcemanager/cosmos/armcosmos/cassandraresources_live_test.go new file mode 100644 index 000000000000..5d02a42eadb4 --- /dev/null +++ b/sdk/resourcemanager/cosmos/armcosmos/cassandraresources_live_test.go @@ -0,0 +1,266 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package armcosmos_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/recording" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/cosmos/armcosmos/v2" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/stretchr/testify/suite" +) + +type CassandraResourcesTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + accountName string + keyspaceName string + tableName string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *CassandraResourcesTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/cosmos/armcosmos/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.accountName, _ = recording.GenerateAlphaNumericID(testsuite.T(), "accountn", 14, true) + testsuite.keyspaceName, _ = recording.GenerateAlphaNumericID(testsuite.T(), "keyspace", 14, false) + testsuite.tableName, _ = recording.GenerateAlphaNumericID(testsuite.T(), "tablenam", 14, false) + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name + testsuite.Prepare() +} + +func (testsuite *CassandraResourcesTestSuite) TearDownSuite() { + testsuite.Cleanup() + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestCassandraResourcesTestSuite(t *testing.T) { + suite.Run(t, new(CassandraResourcesTestSuite)) +} + +func (testsuite *CassandraResourcesTestSuite) Prepare() { + var err error + // From step DatabaseAccounts_CreateOrUpdate + fmt.Println("Call operation: DatabaseAccounts_CreateOrUpdate") + databaseAccountsClient, err := armcosmos.NewDatabaseAccountsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + databaseAccountsClientCreateOrUpdateResponsePoller, err := databaseAccountsClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, armcosmos.DatabaseAccountCreateUpdateParameters{ + Location: to.Ptr(testsuite.location), + Properties: &armcosmos.DatabaseAccountCreateUpdateProperties{ + Capabilities: []*armcosmos.Capability{ + { + Name: to.Ptr("EnableCassandra"), + }}, + CreateMode: to.Ptr(armcosmos.CreateModeDefault), + DatabaseAccountOfferType: to.Ptr("Standard"), + Locations: []*armcosmos.Location{ + { + FailoverPriority: to.Ptr[int32](0), + IsZoneRedundant: to.Ptr(false), + LocationName: to.Ptr(testsuite.location), + }}, + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, databaseAccountsClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) +} + +// Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName} +func (testsuite *CassandraResourcesTestSuite) TestCassandraKeyspace() { + var err error + // From step CassandraResources_CreateUpdateCassandraKeyspace + fmt.Println("Call operation: CassandraResources_CreateUpdateCassandraKeyspace") + cassandraResourcesClient, err := armcosmos.NewCassandraResourcesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + cassandraResourcesClientCreateUpdateCassandraKeyspaceResponsePoller, err := cassandraResourcesClient.BeginCreateUpdateCassandraKeyspace(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.keyspaceName, armcosmos.CassandraKeyspaceCreateUpdateParameters{ + Location: to.Ptr(testsuite.location), + Tags: map[string]*string{}, + Properties: &armcosmos.CassandraKeyspaceCreateUpdateProperties{ + Options: &armcosmos.CreateUpdateOptions{ + Throughput: to.Ptr[int32](2000), + }, + Resource: &armcosmos.CassandraKeyspaceResource{ + ID: to.Ptr(testsuite.keyspaceName), + }, + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, cassandraResourcesClientCreateUpdateCassandraKeyspaceResponsePoller) + testsuite.Require().NoError(err) + + // From step CassandraResources_ListCassandraKeyspaces + fmt.Println("Call operation: CassandraResources_ListCassandraKeyspaces") + cassandraResourcesClientNewListCassandraKeyspacesPager := cassandraResourcesClient.NewListCassandraKeyspacesPager(testsuite.resourceGroupName, testsuite.accountName, nil) + for cassandraResourcesClientNewListCassandraKeyspacesPager.More() { + _, err := cassandraResourcesClientNewListCassandraKeyspacesPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step CassandraResources_GetCassandraKeyspaceThroughput + fmt.Println("Call operation: CassandraResources_GetCassandraKeyspaceThroughput") + _, err = cassandraResourcesClient.GetCassandraKeyspaceThroughput(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.keyspaceName, nil) + testsuite.Require().NoError(err) + + // From step CassandraResources_GetCassandraKeyspace + fmt.Println("Call operation: CassandraResources_GetCassandraKeyspace") + _, err = cassandraResourcesClient.GetCassandraKeyspace(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.keyspaceName, nil) + testsuite.Require().NoError(err) + + // From step CassandraResources_MigrateCassandraKeyspaceToAutoscale + fmt.Println("Call operation: CassandraResources_MigrateCassandraKeyspaceToAutoscale") + cassandraResourcesClientMigrateCassandraKeyspaceToAutoscaleResponsePoller, err := cassandraResourcesClient.BeginMigrateCassandraKeyspaceToAutoscale(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.keyspaceName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, cassandraResourcesClientMigrateCassandraKeyspaceToAutoscaleResponsePoller) + testsuite.Require().NoError(err) + + // From step CassandraResources_MigrateCassandraKeyspaceToManualThroughput + fmt.Println("Call operation: CassandraResources_MigrateCassandraKeyspaceToManualThroughput") + cassandraResourcesClientMigrateCassandraKeyspaceToManualThroughputResponsePoller, err := cassandraResourcesClient.BeginMigrateCassandraKeyspaceToManualThroughput(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.keyspaceName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, cassandraResourcesClientMigrateCassandraKeyspaceToManualThroughputResponsePoller) + testsuite.Require().NoError(err) + + // From step CassandraResources_UpdateCassandraKeyspaceThroughput + fmt.Println("Call operation: CassandraResources_UpdateCassandraKeyspaceThroughput") + cassandraResourcesClientUpdateCassandraKeyspaceThroughputResponsePoller, err := cassandraResourcesClient.BeginUpdateCassandraKeyspaceThroughput(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.keyspaceName, armcosmos.ThroughputSettingsUpdateParameters{ + Location: to.Ptr(testsuite.location), + Tags: map[string]*string{}, + Properties: &armcosmos.ThroughputSettingsUpdateProperties{ + Resource: &armcosmos.ThroughputSettingsResource{ + Throughput: to.Ptr[int32](400), + }, + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, cassandraResourcesClientUpdateCassandraKeyspaceThroughputResponsePoller) + testsuite.Require().NoError(err) +} + +// Microsoft.DocumentDB/databaseAccounts/{accountName}/cassandraKeyspaces/{keyspaceName}/tables/{tableName} +func (testsuite *CassandraResourcesTestSuite) TestCassandraTable() { + var err error + // From step CassandraResources_CreateUpdateCassandraTable + fmt.Println("Call operation: CassandraResources_CreateUpdateCassandraTable") + cassandraResourcesClient, err := armcosmos.NewCassandraResourcesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + cassandraResourcesClientCreateUpdateCassandraTableResponsePoller, err := cassandraResourcesClient.BeginCreateUpdateCassandraTable(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.keyspaceName, testsuite.tableName, armcosmos.CassandraTableCreateUpdateParameters{ + Location: to.Ptr(testsuite.location), + Tags: map[string]*string{}, + Properties: &armcosmos.CassandraTableCreateUpdateProperties{ + Options: &armcosmos.CreateUpdateOptions{ + Throughput: to.Ptr[int32](2000), + }, + Resource: &armcosmos.CassandraTableResource{ + Schema: &armcosmos.CassandraSchema{ + Columns: []*armcosmos.Column{ + { + Name: to.Ptr("columnA"), + Type: to.Ptr("Ascii"), + }}, + PartitionKeys: []*armcosmos.CassandraPartitionKey{ + { + Name: to.Ptr("columnA"), + }}, + }, + DefaultTTL: to.Ptr[int32](100), + ID: to.Ptr(testsuite.tableName), + }, + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, cassandraResourcesClientCreateUpdateCassandraTableResponsePoller) + testsuite.Require().NoError(err) + + // From step CassandraResources_GetCassandraTable + fmt.Println("Call operation: CassandraResources_GetCassandraTable") + _, err = cassandraResourcesClient.GetCassandraTable(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.keyspaceName, testsuite.tableName, nil) + testsuite.Require().NoError(err) + + // From step CassandraResources_ListCassandraTables + fmt.Println("Call operation: CassandraResources_ListCassandraTables") + cassandraResourcesClientNewListCassandraTablesPager := cassandraResourcesClient.NewListCassandraTablesPager(testsuite.resourceGroupName, testsuite.accountName, testsuite.keyspaceName, nil) + for cassandraResourcesClientNewListCassandraTablesPager.More() { + _, err := cassandraResourcesClientNewListCassandraTablesPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step CassandraResources_GetCassandraTableThroughput + fmt.Println("Call operation: CassandraResources_GetCassandraTableThroughput") + _, err = cassandraResourcesClient.GetCassandraTableThroughput(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.keyspaceName, testsuite.tableName, nil) + testsuite.Require().NoError(err) + + // From step CassandraResources_MigrateCassandraTableToAutoscale + fmt.Println("Call operation: CassandraResources_MigrateCassandraTableToAutoscale") + cassandraResourcesClientMigrateCassandraTableToAutoscaleResponsePoller, err := cassandraResourcesClient.BeginMigrateCassandraTableToAutoscale(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.keyspaceName, testsuite.tableName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, cassandraResourcesClientMigrateCassandraTableToAutoscaleResponsePoller) + testsuite.Require().NoError(err) + + // From step CassandraResources_MigrateCassandraTableToManualThroughput + fmt.Println("Call operation: CassandraResources_MigrateCassandraTableToManualThroughput") + cassandraResourcesClientMigrateCassandraTableToManualThroughputResponsePoller, err := cassandraResourcesClient.BeginMigrateCassandraTableToManualThroughput(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.keyspaceName, testsuite.tableName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, cassandraResourcesClientMigrateCassandraTableToManualThroughputResponsePoller) + testsuite.Require().NoError(err) + + // From step CassandraResources_UpdateCassandraTableThroughput + fmt.Println("Call operation: CassandraResources_UpdateCassandraTableThroughput") + cassandraResourcesClientUpdateCassandraTableThroughputResponsePoller, err := cassandraResourcesClient.BeginUpdateCassandraTableThroughput(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.keyspaceName, testsuite.tableName, armcosmos.ThroughputSettingsUpdateParameters{ + Location: to.Ptr(testsuite.location), + Tags: map[string]*string{}, + Properties: &armcosmos.ThroughputSettingsUpdateProperties{ + Resource: &armcosmos.ThroughputSettingsResource{ + Throughput: to.Ptr[int32](400), + }, + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, cassandraResourcesClientUpdateCassandraTableThroughputResponsePoller) + testsuite.Require().NoError(err) + + // From step CassandraResources_DeleteCassandraTable + fmt.Println("Call operation: CassandraResources_DeleteCassandraTable") + cassandraResourcesClientDeleteCassandraTableResponsePoller, err := cassandraResourcesClient.BeginDeleteCassandraTable(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.keyspaceName, testsuite.tableName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, cassandraResourcesClientDeleteCassandraTableResponsePoller) + testsuite.Require().NoError(err) +} + +func (testsuite *CassandraResourcesTestSuite) Cleanup() { + var err error + // From step CassandraResources_DeleteCassandraKeyspace + fmt.Println("Call operation: CassandraResources_DeleteCassandraKeyspace") + cassandraResourcesClient, err := armcosmos.NewCassandraResourcesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + cassandraResourcesClientDeleteCassandraKeyspaceResponsePoller, err := cassandraResourcesClient.BeginDeleteCassandraKeyspace(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.keyspaceName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, cassandraResourcesClientDeleteCassandraKeyspaceResponsePoller) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/cosmos/armcosmos/databaseaccounts_live_test.go b/sdk/resourcemanager/cosmos/armcosmos/databaseaccounts_live_test.go new file mode 100644 index 000000000000..3a8064d476e4 --- /dev/null +++ b/sdk/resourcemanager/cosmos/armcosmos/databaseaccounts_live_test.go @@ -0,0 +1,223 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package armcosmos_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/recording" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/cosmos/armcosmos/v2" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/stretchr/testify/suite" +) + +type DatabaseAccountsTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + accountName string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *DatabaseAccountsTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/cosmos/armcosmos/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.accountName, _ = recording.GenerateAlphaNumericID(testsuite.T(), "accountn", 14, true) + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name +} + +func (testsuite *DatabaseAccountsTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestDatabaseAccountsTestSuite(t *testing.T) { + suite.Run(t, new(DatabaseAccountsTestSuite)) +} + +// Microsoft.DocumentDB/databaseAccounts/{accountName} +func (testsuite *DatabaseAccountsTestSuite) TestDatabaseAccounts() { + var err error + // From step DatabaseAccounts_CheckNameExists + fmt.Println("Call operation: DatabaseAccounts_CheckNameExists") + databaseAccountsClient, err := armcosmos.NewDatabaseAccountsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + _, err = databaseAccountsClient.CheckNameExists(testsuite.ctx, testsuite.accountName, nil) + testsuite.Require().NoError(err) + + // From step DatabaseAccounts_CreateOrUpdate + fmt.Println("Call operation: DatabaseAccounts_CreateOrUpdate") + databaseAccountsClientCreateOrUpdateResponsePoller, err := databaseAccountsClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, armcosmos.DatabaseAccountCreateUpdateParameters{ + Location: to.Ptr(testsuite.location), + Properties: &armcosmos.DatabaseAccountCreateUpdateProperties{ + CreateMode: to.Ptr(armcosmos.CreateModeDefault), + DatabaseAccountOfferType: to.Ptr("Standard"), + Locations: []*armcosmos.Location{ + { + FailoverPriority: to.Ptr[int32](2), + LocationName: to.Ptr("southcentralus"), + }, + { + FailoverPriority: to.Ptr[int32](1), + LocationName: to.Ptr("eastus"), + }, + { + FailoverPriority: to.Ptr[int32](0), + LocationName: to.Ptr("westus"), + }}, + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, databaseAccountsClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step DatabaseAccounts_List + fmt.Println("Call operation: DatabaseAccounts_List") + databaseAccountsClientNewListPager := databaseAccountsClient.NewListPager(nil) + for databaseAccountsClientNewListPager.More() { + _, err := databaseAccountsClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step DatabaseAccounts_ListUsages + fmt.Println("Call operation: DatabaseAccounts_ListUsages") + databaseAccountsClientNewListUsagesPager := databaseAccountsClient.NewListUsagesPager(testsuite.resourceGroupName, testsuite.accountName, &armcosmos.DatabaseAccountsClientListUsagesOptions{Filter: to.Ptr("")}) + for databaseAccountsClientNewListUsagesPager.More() { + _, err := databaseAccountsClientNewListUsagesPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step DatabaseAccounts_GetReadOnlyKeys + fmt.Println("Call operation: DatabaseAccounts_GetReadOnlyKeys") + _, err = databaseAccountsClient.GetReadOnlyKeys(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, nil) + testsuite.Require().NoError(err) + + // From step DatabaseAccounts_ListByResourceGroup + fmt.Println("Call operation: DatabaseAccounts_ListByResourceGroup") + databaseAccountsClientNewListByResourceGroupPager := databaseAccountsClient.NewListByResourceGroupPager(testsuite.resourceGroupName, nil) + for databaseAccountsClientNewListByResourceGroupPager.More() { + _, err := databaseAccountsClientNewListByResourceGroupPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step DatabaseAccounts_ListMetricDefinitions + fmt.Println("Call operation: DatabaseAccounts_ListMetricDefinitions") + databaseAccountsClientNewListMetricDefinitionsPager := databaseAccountsClient.NewListMetricDefinitionsPager(testsuite.resourceGroupName, testsuite.accountName, nil) + for databaseAccountsClientNewListMetricDefinitionsPager.More() { + _, err := databaseAccountsClientNewListMetricDefinitionsPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step DatabaseAccounts_Get + fmt.Println("Call operation: DatabaseAccounts_Get") + _, err = databaseAccountsClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, nil) + testsuite.Require().NoError(err) + + // From step DatabaseAccounts_Update + fmt.Println("Call operation: DatabaseAccounts_Update") + databaseAccountsClientUpdateResponsePoller, err := databaseAccountsClient.BeginUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, armcosmos.DatabaseAccountUpdateParameters{ + Tags: map[string]*string{ + "dept": to.Ptr("finance"), + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, databaseAccountsClientUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step DatabaseAccounts_RegenerateKey + fmt.Println("Call operation: DatabaseAccounts_RegenerateKey") + databaseAccountsClientRegenerateKeyResponsePoller, err := databaseAccountsClient.BeginRegenerateKey(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, armcosmos.DatabaseAccountRegenerateKeyParameters{ + KeyKind: to.Ptr(armcosmos.KeyKindPrimary), + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, databaseAccountsClientRegenerateKeyResponsePoller) + testsuite.Require().NoError(err) + + // From step DatabaseAccounts_ListReadOnlyKeys + fmt.Println("Call operation: DatabaseAccounts_ListReadOnlyKeys") + _, err = databaseAccountsClient.ListReadOnlyKeys(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, nil) + testsuite.Require().NoError(err) + + // From step DatabaseAccounts_ListConnectionStrings + fmt.Println("Call operation: DatabaseAccounts_ListConnectionStrings") + _, err = databaseAccountsClient.ListConnectionStrings(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, nil) + testsuite.Require().NoError(err) + + // From step DatabaseAccounts_ListKeys + fmt.Println("Call operation: DatabaseAccounts_ListKeys") + _, err = databaseAccountsClient.ListKeys(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, nil) + testsuite.Require().NoError(err) + + // From step DatabaseAccounts_FailoverPriorityChange + fmt.Println("Call operation: DatabaseAccounts_FailoverPriorityChange") + databaseAccountsClientFailoverPriorityChangeResponsePoller, err := databaseAccountsClient.BeginFailoverPriorityChange(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, armcosmos.FailoverPolicies{ + FailoverPolicies: []*armcosmos.FailoverPolicy{ + { + FailoverPriority: to.Ptr[int32](0), + LocationName: to.Ptr("eastus"), + }, + { + FailoverPriority: to.Ptr[int32](2), + LocationName: to.Ptr("southcentralus"), + }, + { + FailoverPriority: to.Ptr[int32](1), + LocationName: to.Ptr("westus"), + }}, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, databaseAccountsClientFailoverPriorityChangeResponsePoller) + testsuite.Require().NoError(err) + + // From step RestorableDatabaseAccounts_ListByLocation + fmt.Println("Call operation: RestorableDatabaseAccounts_ListByLocation") + restorableDatabaseAccountsClient, err := armcosmos.NewRestorableDatabaseAccountsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + restorableDatabaseAccountsClientNewListByLocationPager := restorableDatabaseAccountsClient.NewListByLocationPager(testsuite.location, nil) + for restorableDatabaseAccountsClientNewListByLocationPager.More() { + _, err := restorableDatabaseAccountsClientNewListByLocationPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step RestorableDatabaseAccounts_List + fmt.Println("Call operation: RestorableDatabaseAccounts_List") + restorableDatabaseAccountsClientNewListPager := restorableDatabaseAccountsClient.NewListPager(nil) + for restorableDatabaseAccountsClientNewListPager.More() { + _, err := restorableDatabaseAccountsClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step DatabaseAccounts_Delete + fmt.Println("Call operation: DatabaseAccounts_Delete") + databaseAccountsClientDeleteResponsePoller, err := databaseAccountsClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, databaseAccountsClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/cosmos/armcosmos/go.mod b/sdk/resourcemanager/cosmos/armcosmos/go.mod index 7e1ccc0fbbd8..635ba01cfcae 100644 --- a/sdk/resourcemanager/cosmos/armcosmos/go.mod +++ b/sdk/resourcemanager/cosmos/armcosmos/go.mod @@ -3,19 +3,27 @@ module github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/cosmos/armcosmos/v2 go 1.18 require ( - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.5.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.2 + github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.1.2 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1 + github.com/stretchr/testify v1.8.2 ) require ( - github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v0.9.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/dnaeon/go-vcr v1.1.0 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/google/uuid v1.3.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect golang.org/x/crypto v0.6.0 // indirect - golang.org/x/net v0.7.0 // indirect - golang.org/x/sys v0.5.0 // indirect - golang.org/x/text v0.7.0 // indirect + golang.org/x/net v0.8.0 // indirect + golang.org/x/sys v0.6.0 // indirect + golang.org/x/text v0.8.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/sdk/resourcemanager/cosmos/armcosmos/go.sum b/sdk/resourcemanager/cosmos/armcosmos/go.sum index 8ba445a8c4da..e625fa8fa1b9 100644 --- a/sdk/resourcemanager/cosmos/armcosmos/go.sum +++ b/sdk/resourcemanager/cosmos/armcosmos/go.sum @@ -1,31 +1,53 @@ -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0 h1:rTnT/Jrcm+figWlYz4Ixzt0SJVR2cMC8lvZcimipiEY= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0/go.mod h1:ON4tFdPTwRcgWEaVDrN3584Ef+b7GgSJaXxe5fW9t4M= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.5.0 h1:xGLAFFd9D3iLGxYiUGPdITSzsFmU1K8VtfuUHWAoN7M= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.5.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.2 h1:uqM+VoHjVH6zdlkLF2b6O0ZANcHoj3rO0PoQ3jglUJA= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.2/go.mod h1:twTKAa1E6hLmSDjLhaCkbTMQKc7p/rNLU40rLxGEOCI= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0 h1:leh5DwKv6Ihwi+h60uHtn6UWAxBbZ0q8DwQVMzf61zw= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.1.2 h1:mLY+pNLjCUeKhgnAJWAKhEUQM+RJQo2H1fuGSw1Ky1E= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal v1.1.2/go.mod h1:FbdwsQ2EzwvXxOPcMFYO8ogEc9uMMIj3YkmCdXdAFmk= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/managementgroups/armmanagementgroups v1.0.0 h1:pPvTJ1dY0sA35JOeFq6TsY2xj6Z85Yo23Pj4wCCvu4o= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1 h1:7CBQ+Ei8SP2c6ydQTGCCrS35bDxgTMfoP2miAwK++OU= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1/go.mod h1:c/wcGeGx5FUPbM/JltUYHZcKmigwyVLJlDq+4HdtXaw= github.com/AzureAD/microsoft-authentication-library-for-go v0.9.0 h1:UE9n9rkJF62ArLb1F3DEjRt8O3jLwMWdSoypKV4f3MU= github.com/AzureAD/microsoft-authentication-library-for-go v0.9.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c= +github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/sdk/resourcemanager/cosmos/armcosmos/gremlinresources_live_test.go b/sdk/resourcemanager/cosmos/armcosmos/gremlinresources_live_test.go new file mode 100644 index 000000000000..aa932ebd6385 --- /dev/null +++ b/sdk/resourcemanager/cosmos/armcosmos/gremlinresources_live_test.go @@ -0,0 +1,293 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package armcosmos_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/recording" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/cosmos/armcosmos/v2" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/stretchr/testify/suite" +) + +type GremlinResourcesTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + accountName string + databaseName string + graphName string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *GremlinResourcesTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/cosmos/armcosmos/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.accountName, _ = recording.GenerateAlphaNumericID(testsuite.T(), "accountn", 14, true) + testsuite.databaseName, _ = recording.GenerateAlphaNumericID(testsuite.T(), "gremlindb", 15, false) + testsuite.graphName, _ = recording.GenerateAlphaNumericID(testsuite.T(), "graphnam", 14, false) + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name + testsuite.Prepare() +} + +func (testsuite *GremlinResourcesTestSuite) TearDownSuite() { + testsuite.Cleanup() + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestGremlinResourcesTestSuite(t *testing.T) { + suite.Run(t, new(GremlinResourcesTestSuite)) +} + +func (testsuite *GremlinResourcesTestSuite) Prepare() { + var err error + // From step DatabaseAccounts_CreateOrUpdate + fmt.Println("Call operation: DatabaseAccounts_CreateOrUpdate") + databaseAccountsClient, err := armcosmos.NewDatabaseAccountsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + databaseAccountsClientCreateOrUpdateResponsePoller, err := databaseAccountsClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, armcosmos.DatabaseAccountCreateUpdateParameters{ + Location: to.Ptr(testsuite.location), + Properties: &armcosmos.DatabaseAccountCreateUpdateProperties{ + Capabilities: []*armcosmos.Capability{ + { + Name: to.Ptr("EnableGremlin"), + }}, + CreateMode: to.Ptr(armcosmos.CreateModeDefault), + DatabaseAccountOfferType: to.Ptr("Standard"), + Locations: []*armcosmos.Location{ + { + FailoverPriority: to.Ptr[int32](0), + IsZoneRedundant: to.Ptr(false), + LocationName: to.Ptr(testsuite.location), + }}, + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, databaseAccountsClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step GremlinResources_CreateUpdateGremlinDatabase + fmt.Println("Call operation: GremlinResources_CreateUpdateGremlinDatabase") + gremlinResourcesClient, err := armcosmos.NewGremlinResourcesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + gremlinResourcesClientCreateUpdateGremlinDatabaseResponsePoller, err := gremlinResourcesClient.BeginCreateUpdateGremlinDatabase(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, armcosmos.GremlinDatabaseCreateUpdateParameters{ + Location: to.Ptr(testsuite.location), + Tags: map[string]*string{}, + Properties: &armcosmos.GremlinDatabaseCreateUpdateProperties{ + Options: &armcosmos.CreateUpdateOptions{ + Throughput: to.Ptr[int32](2000), + }, + Resource: &armcosmos.GremlinDatabaseResource{ + ID: to.Ptr(testsuite.databaseName), + }, + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, gremlinResourcesClientCreateUpdateGremlinDatabaseResponsePoller) + testsuite.Require().NoError(err) +} + +// Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName} +func (testsuite *GremlinResourcesTestSuite) TestGremlinDatabase() { + var err error + // From step GremlinResources_ListGremlinDatabases + fmt.Println("Call operation: GremlinResources_ListGremlinDatabases") + gremlinResourcesClient, err := armcosmos.NewGremlinResourcesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + gremlinResourcesClientNewListGremlinDatabasesPager := gremlinResourcesClient.NewListGremlinDatabasesPager(testsuite.resourceGroupName, testsuite.accountName, nil) + for gremlinResourcesClientNewListGremlinDatabasesPager.More() { + _, err := gremlinResourcesClientNewListGremlinDatabasesPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step GremlinResources_GetGremlinDatabaseThroughput + fmt.Println("Call operation: GremlinResources_GetGremlinDatabaseThroughput") + _, err = gremlinResourcesClient.GetGremlinDatabaseThroughput(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, nil) + testsuite.Require().NoError(err) + + // From step GremlinResources_GetGremlinDatabase + fmt.Println("Call operation: GremlinResources_GetGremlinDatabase") + _, err = gremlinResourcesClient.GetGremlinDatabase(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, nil) + testsuite.Require().NoError(err) + + // From step GremlinResources_MigrateGremlinDatabaseToAutoscale + fmt.Println("Call operation: GremlinResources_MigrateGremlinDatabaseToAutoscale") + gremlinResourcesClientMigrateGremlinDatabaseToAutoscaleResponsePoller, err := gremlinResourcesClient.BeginMigrateGremlinDatabaseToAutoscale(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, gremlinResourcesClientMigrateGremlinDatabaseToAutoscaleResponsePoller) + testsuite.Require().NoError(err) + + // From step GremlinResources_MigrateGremlinDatabaseToManualThroughput + fmt.Println("Call operation: GremlinResources_MigrateGremlinDatabaseToManualThroughput") + gremlinResourcesClientMigrateGremlinDatabaseToManualThroughputResponsePoller, err := gremlinResourcesClient.BeginMigrateGremlinDatabaseToManualThroughput(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, gremlinResourcesClientMigrateGremlinDatabaseToManualThroughputResponsePoller) + testsuite.Require().NoError(err) + + // From step GremlinResources_UpdateGremlinDatabaseThroughput + fmt.Println("Call operation: GremlinResources_UpdateGremlinDatabaseThroughput") + gremlinResourcesClientUpdateGremlinDatabaseThroughputResponsePoller, err := gremlinResourcesClient.BeginUpdateGremlinDatabaseThroughput(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, armcosmos.ThroughputSettingsUpdateParameters{ + Location: to.Ptr(testsuite.location), + Tags: map[string]*string{}, + Properties: &armcosmos.ThroughputSettingsUpdateProperties{ + Resource: &armcosmos.ThroughputSettingsResource{ + Throughput: to.Ptr[int32](400), + }, + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, gremlinResourcesClientUpdateGremlinDatabaseThroughputResponsePoller) + testsuite.Require().NoError(err) +} + +// Microsoft.DocumentDB/databaseAccounts/{accountName}/gremlinDatabases/{databaseName}/graphs/{graphName} +func (testsuite *GremlinResourcesTestSuite) TestGremlinGraph() { + var err error + // From step GremlinResources_CreateUpdateGremlinGraph + fmt.Println("Call operation: GremlinResources_CreateUpdateGremlinGraph") + gremlinResourcesClient, err := armcosmos.NewGremlinResourcesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + gremlinResourcesClientCreateUpdateGremlinGraphResponsePoller, err := gremlinResourcesClient.BeginCreateUpdateGremlinGraph(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, testsuite.graphName, armcosmos.GremlinGraphCreateUpdateParameters{ + Location: to.Ptr(testsuite.location), + Tags: map[string]*string{}, + Properties: &armcosmos.GremlinGraphCreateUpdateProperties{ + Options: &armcosmos.CreateUpdateOptions{ + Throughput: to.Ptr[int32](2000), + }, + Resource: &armcosmos.GremlinGraphResource{ + ConflictResolutionPolicy: &armcosmos.ConflictResolutionPolicy{ + ConflictResolutionPath: to.Ptr("/path"), + Mode: to.Ptr(armcosmos.ConflictResolutionModeLastWriterWins), + }, + DefaultTTL: to.Ptr[int32](100), + ID: to.Ptr(testsuite.graphName), + IndexingPolicy: &armcosmos.IndexingPolicy{ + Automatic: to.Ptr(true), + ExcludedPaths: []*armcosmos.ExcludedPath{}, + IncludedPaths: []*armcosmos.IncludedPath{ + { + Path: to.Ptr("/*"), + Indexes: []*armcosmos.Indexes{ + { + DataType: to.Ptr(armcosmos.DataTypeString), + Kind: to.Ptr(armcosmos.IndexKindRange), + Precision: to.Ptr[int32](-1), + }, + { + DataType: to.Ptr(armcosmos.DataTypeNumber), + Kind: to.Ptr(armcosmos.IndexKindRange), + Precision: to.Ptr[int32](-1), + }}, + }}, + IndexingMode: to.Ptr(armcosmos.IndexingModeConsistent), + }, + PartitionKey: &armcosmos.ContainerPartitionKey{ + Kind: to.Ptr(armcosmos.PartitionKindHash), + Paths: []*string{ + to.Ptr("/AccountNumber")}, + }, + UniqueKeyPolicy: &armcosmos.UniqueKeyPolicy{ + UniqueKeys: []*armcosmos.UniqueKey{ + { + Paths: []*string{ + to.Ptr("/testPath")}, + }}, + }, + }, + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, gremlinResourcesClientCreateUpdateGremlinGraphResponsePoller) + testsuite.Require().NoError(err) + + // From step GremlinResources_GetGremlinGraph + fmt.Println("Call operation: GremlinResources_GetGremlinGraph") + _, err = gremlinResourcesClient.GetGremlinGraph(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, testsuite.graphName, nil) + testsuite.Require().NoError(err) + + // From step GremlinResources_GetGremlinGraphThroughput + fmt.Println("Call operation: GremlinResources_GetGremlinGraphThroughput") + _, err = gremlinResourcesClient.GetGremlinGraphThroughput(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, testsuite.graphName, nil) + testsuite.Require().NoError(err) + + // From step GremlinResources_ListGremlinGraphs + fmt.Println("Call operation: GremlinResources_ListGremlinGraphs") + gremlinResourcesClientNewListGremlinGraphsPager := gremlinResourcesClient.NewListGremlinGraphsPager(testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, nil) + for gremlinResourcesClientNewListGremlinGraphsPager.More() { + _, err := gremlinResourcesClientNewListGremlinGraphsPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step GremlinResources_MigrateGremlinGraphToAutoscale + fmt.Println("Call operation: GremlinResources_MigrateGremlinGraphToAutoscale") + gremlinResourcesClientMigrateGremlinGraphToAutoscaleResponsePoller, err := gremlinResourcesClient.BeginMigrateGremlinGraphToAutoscale(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, testsuite.graphName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, gremlinResourcesClientMigrateGremlinGraphToAutoscaleResponsePoller) + testsuite.Require().NoError(err) + + // From step GremlinResources_MigrateGremlinGraphToManualThroughput + fmt.Println("Call operation: GremlinResources_MigrateGremlinGraphToManualThroughput") + gremlinResourcesClientMigrateGremlinGraphToManualThroughputResponsePoller, err := gremlinResourcesClient.BeginMigrateGremlinGraphToManualThroughput(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, testsuite.graphName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, gremlinResourcesClientMigrateGremlinGraphToManualThroughputResponsePoller) + testsuite.Require().NoError(err) + + // From step GremlinResources_UpdateGremlinGraphThroughput + fmt.Println("Call operation: GremlinResources_UpdateGremlinGraphThroughput") + gremlinResourcesClientUpdateGremlinGraphThroughputResponsePoller, err := gremlinResourcesClient.BeginUpdateGremlinGraphThroughput(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, testsuite.graphName, armcosmos.ThroughputSettingsUpdateParameters{ + Location: to.Ptr(testsuite.location), + Tags: map[string]*string{}, + Properties: &armcosmos.ThroughputSettingsUpdateProperties{ + Resource: &armcosmos.ThroughputSettingsResource{ + Throughput: to.Ptr[int32](400), + }, + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, gremlinResourcesClientUpdateGremlinGraphThroughputResponsePoller) + testsuite.Require().NoError(err) + + // From step GremlinResources_DeleteGremlinGraph + fmt.Println("Call operation: GremlinResources_DeleteGremlinGraph") + gremlinResourcesClientDeleteGremlinGraphResponsePoller, err := gremlinResourcesClient.BeginDeleteGremlinGraph(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, testsuite.graphName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, gremlinResourcesClientDeleteGremlinGraphResponsePoller) + testsuite.Require().NoError(err) +} + +func (testsuite *GremlinResourcesTestSuite) Cleanup() { + var err error + // From step GremlinResources_DeleteGremlinDatabase + fmt.Println("Call operation: GremlinResources_DeleteGremlinDatabase") + gremlinResourcesClient, err := armcosmos.NewGremlinResourcesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + gremlinResourcesClientDeleteGremlinDatabaseResponsePoller, err := gremlinResourcesClient.BeginDeleteGremlinDatabase(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, gremlinResourcesClientDeleteGremlinDatabaseResponsePoller) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/cosmos/armcosmos/mongodbresources_live_test.go b/sdk/resourcemanager/cosmos/armcosmos/mongodbresources_live_test.go new file mode 100644 index 000000000000..bf4f8bbbab7c --- /dev/null +++ b/sdk/resourcemanager/cosmos/armcosmos/mongodbresources_live_test.go @@ -0,0 +1,256 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package armcosmos_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/recording" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/cosmos/armcosmos/v2" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/stretchr/testify/suite" +) + +type MongoDbResourcesTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + accountName string + collectionName string + databaseName string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *MongoDbResourcesTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/cosmos/armcosmos/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.accountName, _ = recording.GenerateAlphaNumericID(testsuite.T(), "accountn", 14, true) + testsuite.collectionName, _ = recording.GenerateAlphaNumericID(testsuite.T(), "collecti", 14, false) + testsuite.databaseName, _ = recording.GenerateAlphaNumericID(testsuite.T(), "mongodb", 14, false) + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name + testsuite.Prepare() +} + +func (testsuite *MongoDbResourcesTestSuite) TearDownSuite() { + testsuite.Cleanup() + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestMongoDbResourcesTestSuite(t *testing.T) { + suite.Run(t, new(MongoDbResourcesTestSuite)) +} + +func (testsuite *MongoDbResourcesTestSuite) Prepare() { + var err error + // From step DatabaseAccounts_CreateOrUpdate + fmt.Println("Call operation: DatabaseAccounts_CreateOrUpdate") + databaseAccountsClient, err := armcosmos.NewDatabaseAccountsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + databaseAccountsClientCreateOrUpdateResponsePoller, err := databaseAccountsClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, armcosmos.DatabaseAccountCreateUpdateParameters{ + Location: to.Ptr(testsuite.location), + Kind: to.Ptr(armcosmos.DatabaseAccountKindMongoDB), + Properties: &armcosmos.DatabaseAccountCreateUpdateProperties{ + CreateMode: to.Ptr(armcosmos.CreateModeDefault), + DatabaseAccountOfferType: to.Ptr("Standard"), + Locations: []*armcosmos.Location{ + { + FailoverPriority: to.Ptr[int32](0), + IsZoneRedundant: to.Ptr(false), + LocationName: to.Ptr(testsuite.location), + }}, + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, databaseAccountsClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step MongoDBResources_CreateUpdateMongoDBDatabase + fmt.Println("Call operation: MongoDBResources_CreateUpdateMongoDBDatabase") + mongoDBResourcesClient, err := armcosmos.NewMongoDBResourcesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + mongoDBResourcesClientCreateUpdateMongoDBDatabaseResponsePoller, err := mongoDBResourcesClient.BeginCreateUpdateMongoDBDatabase(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, armcosmos.MongoDBDatabaseCreateUpdateParameters{ + Location: to.Ptr(testsuite.location), + Tags: map[string]*string{}, + Properties: &armcosmos.MongoDBDatabaseCreateUpdateProperties{ + Options: &armcosmos.CreateUpdateOptions{ + Throughput: to.Ptr[int32](2000), + }, + Resource: &armcosmos.MongoDBDatabaseResource{ + ID: to.Ptr(testsuite.databaseName), + }, + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, mongoDBResourcesClientCreateUpdateMongoDBDatabaseResponsePoller) + testsuite.Require().NoError(err) +} + +// Microsoft.DocumentDB/databaseAccounts/{accountName}/mongodbDatabases/{databaseName} +func (testsuite *MongoDbResourcesTestSuite) TestMongoDbDatabase() { + var err error + // From step MongoDBResources_ListMongoDBDatabases + fmt.Println("Call operation: MongoDBResources_ListMongoDBDatabases") + mongoDBResourcesClient, err := armcosmos.NewMongoDBResourcesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + mongoDBResourcesClientNewListMongoDBDatabasesPager := mongoDBResourcesClient.NewListMongoDBDatabasesPager(testsuite.resourceGroupName, testsuite.accountName, nil) + for mongoDBResourcesClientNewListMongoDBDatabasesPager.More() { + _, err := mongoDBResourcesClientNewListMongoDBDatabasesPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step MongoDBResources_GetMongoDBDatabase + fmt.Println("Call operation: MongoDBResources_GetMongoDBDatabase") + _, err = mongoDBResourcesClient.GetMongoDBDatabase(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, nil) + testsuite.Require().NoError(err) + + // From step MongoDBResources_GetMongoDBDatabaseThroughput + fmt.Println("Call operation: MongoDBResources_GetMongoDBDatabaseThroughput") + _, err = mongoDBResourcesClient.GetMongoDBDatabaseThroughput(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, nil) + testsuite.Require().NoError(err) + + // From step MongoDBResources_MigrateMongoDBDatabaseToAutoscale + fmt.Println("Call operation: MongoDBResources_MigrateMongoDBDatabaseToAutoscale") + mongoDBResourcesClientMigrateMongoDBDatabaseToAutoscaleResponsePoller, err := mongoDBResourcesClient.BeginMigrateMongoDBDatabaseToAutoscale(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, mongoDBResourcesClientMigrateMongoDBDatabaseToAutoscaleResponsePoller) + testsuite.Require().NoError(err) + + // From step MongoDBResources_MigrateMongoDBDatabaseToManualThroughput + fmt.Println("Call operation: MongoDBResources_MigrateMongoDBDatabaseToManualThroughput") + mongoDBResourcesClientMigrateMongoDBDatabaseToManualThroughputResponsePoller, err := mongoDBResourcesClient.BeginMigrateMongoDBDatabaseToManualThroughput(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, mongoDBResourcesClientMigrateMongoDBDatabaseToManualThroughputResponsePoller) + testsuite.Require().NoError(err) + + // From step MongoDBResources_UpdateMongoDBDatabaseThroughput + fmt.Println("Call operation: MongoDBResources_UpdateMongoDBDatabaseThroughput") + mongoDBResourcesClientUpdateMongoDBDatabaseThroughputResponsePoller, err := mongoDBResourcesClient.BeginUpdateMongoDBDatabaseThroughput(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, armcosmos.ThroughputSettingsUpdateParameters{ + Location: to.Ptr(testsuite.location), + Tags: map[string]*string{}, + Properties: &armcosmos.ThroughputSettingsUpdateProperties{ + Resource: &armcosmos.ThroughputSettingsResource{ + Throughput: to.Ptr[int32](400), + }, + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, mongoDBResourcesClientUpdateMongoDBDatabaseThroughputResponsePoller) + testsuite.Require().NoError(err) +} + +// Microsoft.DocumentDB/databaseAccounts/{accountName}/mongodbDatabases/{databaseName}/collections/{collectionName} +func (testsuite *MongoDbResourcesTestSuite) TestMongoDbCollection() { + var err error + // From step MongoDBResources_CreateUpdateMongoDBCollection + fmt.Println("Call operation: MongoDBResources_CreateUpdateMongoDBCollection") + mongoDBResourcesClient, err := armcosmos.NewMongoDBResourcesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + mongoDBResourcesClientCreateUpdateMongoDBCollectionResponsePoller, err := mongoDBResourcesClient.BeginCreateUpdateMongoDBCollection(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, testsuite.collectionName, armcosmos.MongoDBCollectionCreateUpdateParameters{ + Location: to.Ptr(testsuite.location), + Tags: map[string]*string{}, + Properties: &armcosmos.MongoDBCollectionCreateUpdateProperties{ + Options: &armcosmos.CreateUpdateOptions{ + Throughput: to.Ptr[int32](2000), + }, + Resource: &armcosmos.MongoDBCollectionResource{ + ID: to.Ptr(testsuite.collectionName), + ShardKey: map[string]*string{ + "testKey": to.Ptr("Hash"), + }, + }, + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, mongoDBResourcesClientCreateUpdateMongoDBCollectionResponsePoller) + testsuite.Require().NoError(err) + + // From step MongoDBResources_ListMongoDBCollections + fmt.Println("Call operation: MongoDBResources_ListMongoDBCollections") + mongoDBResourcesClientNewListMongoDBCollectionsPager := mongoDBResourcesClient.NewListMongoDBCollectionsPager(testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, nil) + for mongoDBResourcesClientNewListMongoDBCollectionsPager.More() { + _, err := mongoDBResourcesClientNewListMongoDBCollectionsPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step MongoDBResources_GetMongoDBCollection + fmt.Println("Call operation: MongoDBResources_GetMongoDBCollection") + _, err = mongoDBResourcesClient.GetMongoDBCollection(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, testsuite.collectionName, nil) + testsuite.Require().NoError(err) + + // From step MongoDBResources_GetMongoDBCollectionThroughput + fmt.Println("Call operation: MongoDBResources_GetMongoDBCollectionThroughput") + _, err = mongoDBResourcesClient.GetMongoDBCollectionThroughput(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, testsuite.collectionName, nil) + testsuite.Require().NoError(err) + + // From step MongoDBResources_MigrateMongoDBCollectionToAutoscale + fmt.Println("Call operation: MongoDBResources_MigrateMongoDBCollectionToAutoscale") + mongoDBResourcesClientMigrateMongoDBCollectionToAutoscaleResponsePoller, err := mongoDBResourcesClient.BeginMigrateMongoDBCollectionToAutoscale(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, testsuite.collectionName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, mongoDBResourcesClientMigrateMongoDBCollectionToAutoscaleResponsePoller) + testsuite.Require().NoError(err) + + // From step MongoDBResources_MigrateMongoDBCollectionToManualThroughput + fmt.Println("Call operation: MongoDBResources_MigrateMongoDBCollectionToManualThroughput") + mongoDBResourcesClientMigrateMongoDBCollectionToManualThroughputResponsePoller, err := mongoDBResourcesClient.BeginMigrateMongoDBCollectionToManualThroughput(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, testsuite.collectionName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, mongoDBResourcesClientMigrateMongoDBCollectionToManualThroughputResponsePoller) + testsuite.Require().NoError(err) + + // From step MongoDBResources_UpdateMongoDBCollectionThroughput + fmt.Println("Call operation: MongoDBResources_UpdateMongoDBCollectionThroughput") + mongoDBResourcesClientUpdateMongoDBCollectionThroughputResponsePoller, err := mongoDBResourcesClient.BeginUpdateMongoDBCollectionThroughput(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, testsuite.collectionName, armcosmos.ThroughputSettingsUpdateParameters{ + Location: to.Ptr(testsuite.location), + Tags: map[string]*string{}, + Properties: &armcosmos.ThroughputSettingsUpdateProperties{ + Resource: &armcosmos.ThroughputSettingsResource{ + Throughput: to.Ptr[int32](400), + }, + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, mongoDBResourcesClientUpdateMongoDBCollectionThroughputResponsePoller) + testsuite.Require().NoError(err) + + // From step MongoDBResources_DeleteMongoDBCollection + fmt.Println("Call operation: MongoDBResources_DeleteMongoDBCollection") + mongoDBResourcesClientDeleteMongoDBCollectionResponsePoller, err := mongoDBResourcesClient.BeginDeleteMongoDBCollection(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, testsuite.collectionName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, mongoDBResourcesClientDeleteMongoDBCollectionResponsePoller) + testsuite.Require().NoError(err) +} + +func (testsuite *MongoDbResourcesTestSuite) Cleanup() { + var err error + // From step MongoDBResources_DeleteMongoDBDatabase + fmt.Println("Call operation: MongoDBResources_DeleteMongoDBDatabase") + mongoDBResourcesClient, err := armcosmos.NewMongoDBResourcesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + mongoDBResourcesClientDeleteMongoDBDatabaseResponsePoller, err := mongoDBResourcesClient.BeginDeleteMongoDBDatabase(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, mongoDBResourcesClientDeleteMongoDBDatabaseResponsePoller) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/cosmos/armcosmos/operations_live_test.go b/sdk/resourcemanager/cosmos/armcosmos/operations_live_test.go new file mode 100644 index 000000000000..adb91b40c504 --- /dev/null +++ b/sdk/resourcemanager/cosmos/armcosmos/operations_live_test.go @@ -0,0 +1,88 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package armcosmos_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/cosmos/armcosmos/v2" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/stretchr/testify/suite" +) + +type OperationsTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *OperationsTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/cosmos/armcosmos/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name +} + +func (testsuite *OperationsTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestOperationsTestSuite(t *testing.T) { + suite.Run(t, new(OperationsTestSuite)) +} + +// Microsoft.DocumentDB/operations +func (testsuite *OperationsTestSuite) TestOperations() { + var err error + // From step Operations_List + fmt.Println("Call operation: Operations_List") + operationsClient, err := armcosmos.NewOperationsClient(testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + operationsClientNewListPager := operationsClient.NewListPager(nil) + for operationsClientNewListPager.More() { + _, err := operationsClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } +} + +// Microsoft.DocumentDB/locations +func (testsuite *OperationsTestSuite) TestLocations() { + var err error + // From step Locations_List + fmt.Println("Call operation: Locations_List") + locationsClient, err := armcosmos.NewLocationsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + locationsClientNewListPager := locationsClient.NewListPager(nil) + for locationsClientNewListPager.More() { + _, err := locationsClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step Locations_Get + fmt.Println("Call operation: Locations_Get") + _, err = locationsClient.Get(testsuite.ctx, testsuite.location, nil) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/cosmos/armcosmos/privateendpointconnection_live_test.go b/sdk/resourcemanager/cosmos/armcosmos/privateendpointconnection_live_test.go new file mode 100644 index 000000000000..8d3c4a7819b7 --- /dev/null +++ b/sdk/resourcemanager/cosmos/armcosmos/privateendpointconnection_live_test.go @@ -0,0 +1,293 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package armcosmos_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/recording" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/cosmos/armcosmos/v2" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources" + "github.com/stretchr/testify/suite" +) + +type PrivateEndpointConnectionTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + accountName string + cosmosAccountId string + privateEndpointConnectionName string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *PrivateEndpointConnectionTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/cosmos/armcosmos/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.accountName, _ = recording.GenerateAlphaNumericID(testsuite.T(), "accountn", 14, true) + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name + testsuite.Prepare() +} + +func (testsuite *PrivateEndpointConnectionTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestPrivateEndpointConnectionTestSuite(t *testing.T) { + suite.Run(t, new(PrivateEndpointConnectionTestSuite)) +} + +func (testsuite *PrivateEndpointConnectionTestSuite) Prepare() { + var err error + // From step DatabaseAccounts_CreateOrUpdate + fmt.Println("Call operation: DatabaseAccounts_CreateOrUpdate") + databaseAccountsClient, err := armcosmos.NewDatabaseAccountsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + databaseAccountsClientCreateOrUpdateResponsePoller, err := databaseAccountsClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, armcosmos.DatabaseAccountCreateUpdateParameters{ + Location: to.Ptr(testsuite.location), + Properties: &armcosmos.DatabaseAccountCreateUpdateProperties{ + CreateMode: to.Ptr(armcosmos.CreateModeDefault), + DatabaseAccountOfferType: to.Ptr("Standard"), + Locations: []*armcosmos.Location{ + { + FailoverPriority: to.Ptr[int32](0), + IsZoneRedundant: to.Ptr(false), + LocationName: to.Ptr(testsuite.location), + }}, + }, + }, nil) + testsuite.Require().NoError(err) + var databaseAccountsClientCreateOrUpdateResponse *armcosmos.DatabaseAccountsClientCreateOrUpdateResponse + databaseAccountsClientCreateOrUpdateResponse, err = testutil.PollForTest(testsuite.ctx, databaseAccountsClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + testsuite.cosmosAccountId = *databaseAccountsClientCreateOrUpdateResponse.ID + + // From step Create_PrivateEndpoint + template := map[string]any{ + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "outputs": map[string]any{ + "privateEndpointConnectionName": map[string]any{ + "type": "string", + "value": "[parameters('privateEndpointName')]", + }, + }, + "parameters": map[string]any{ + "cosmosAccountId": map[string]any{ + "type": "string", + "defaultValue": testsuite.cosmosAccountId, + }, + "location": map[string]any{ + "type": "string", + "defaultValue": testsuite.location, + }, + "networkInterfaceName": map[string]any{ + "type": "string", + "defaultValue": "epcosmos-nic", + }, + "privateEndpointName": map[string]any{ + "type": "string", + "defaultValue": "epcosmos", + }, + "virtualNetworksName": map[string]any{ + "type": "string", + "defaultValue": "epcosmos-vnet", + }, + }, + "resources": []any{ + map[string]any{ + "name": "[parameters('virtualNetworksName')]", + "type": "Microsoft.Network/virtualNetworks", + "apiVersion": "2020-11-01", + "location": "[parameters('location')]", + "properties": map[string]any{ + "addressSpace": map[string]any{ + "addressPrefixes": []any{ + "10.0.0.0/16", + }, + }, + "enableDdosProtection": false, + "subnets": []any{ + map[string]any{ + "name": "default", + "properties": map[string]any{ + "addressPrefix": "10.0.0.0/24", + "delegations": []any{}, + "privateEndpointNetworkPolicies": "Disabled", + "privateLinkServiceNetworkPolicies": "Enabled", + }, + }, + }, + "virtualNetworkPeerings": []any{}, + }, + }, + map[string]any{ + "name": "[parameters('networkInterfaceName')]", + "type": "Microsoft.Network/networkInterfaces", + "apiVersion": "2020-11-01", + "dependsOn": []any{ + "[resourceId('Microsoft.Network/virtualNetworks/subnets', parameters('virtualNetworksName'), 'default')]", + }, + "location": "[parameters('location')]", + "properties": map[string]any{ + "dnsSettings": map[string]any{ + "dnsServers": []any{}, + }, + "enableIPForwarding": false, + "ipConfigurations": []any{ + map[string]any{ + "name": "privateEndpointIpConfig", + "properties": map[string]any{ + "primary": true, + "privateIPAddress": "10.0.0.4", + "privateIPAddressVersion": "IPv4", + "privateIPAllocationMethod": "Dynamic", + "subnet": map[string]any{ + "id": "[resourceId('Microsoft.Network/virtualNetworks/subnets', parameters('virtualNetworksName'), 'default')]", + }, + }, + }, + }, + }, + }, + map[string]any{ + "name": "[parameters('privateEndpointName')]", + "type": "Microsoft.Network/privateEndpoints", + "apiVersion": "2020-11-01", + "dependsOn": []any{ + "[resourceId('Microsoft.Network/virtualNetworks/subnets', parameters('virtualNetworksName'), 'default')]", + }, + "location": "[parameters('location')]", + "properties": map[string]any{ + "customDnsConfigs": []any{}, + "manualPrivateLinkServiceConnections": []any{}, + "privateLinkServiceConnections": []any{ + map[string]any{ + "name": "[parameters('privateEndpointName')]", + "properties": map[string]any{ + "groupIds": []any{ + "Sql", + }, + "privateLinkServiceConnectionState": map[string]any{ + "description": "Auto-Approved", + "actionsRequired": "None", + "status": "Approved", + }, + "privateLinkServiceId": "[parameters('cosmosAccountId')]", + }, + }, + }, + "subnet": map[string]any{ + "id": "[resourceId('Microsoft.Network/virtualNetworks/subnets', parameters('virtualNetworksName'), 'default')]", + }, + }, + }, + map[string]any{ + "name": "[concat(parameters('virtualNetworksName'), '/default')]", + "type": "Microsoft.Network/virtualNetworks/subnets", + "apiVersion": "2020-11-01", + "dependsOn": []any{ + "[resourceId('Microsoft.Network/virtualNetworks', parameters('virtualNetworksName'))]", + }, + "properties": map[string]any{ + "addressPrefix": "10.0.0.0/24", + "delegations": []any{}, + "privateEndpointNetworkPolicies": "Disabled", + "privateLinkServiceNetworkPolicies": "Enabled", + }, + }, + }, + "variables": map[string]any{}, + } + deployment := armresources.Deployment{ + Properties: &armresources.DeploymentProperties{ + Template: template, + Mode: to.Ptr(armresources.DeploymentModeIncremental), + }, + } + deploymentExtend, err := testutil.CreateDeployment(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName, "Create_PrivateEndpoint", &deployment) + testsuite.Require().NoError(err) + testsuite.privateEndpointConnectionName = deploymentExtend.Properties.Outputs.(map[string]interface{})["privateEndpointConnectionName"].(map[string]interface{})["value"].(string) +} + +// Microsoft.DocumentDB/databaseAccounts/{accountName}/privateEndpointConnections/{privateEndpointConnectionName} +func (testsuite *PrivateEndpointConnectionTestSuite) TestPrivateEndpointConnections() { + var err error + // From step PrivateEndpointConnections_CreateOrUpdate + fmt.Println("Call operation: PrivateEndpointConnections_CreateOrUpdate") + privateEndpointConnectionsClient, err := armcosmos.NewPrivateEndpointConnectionsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + privateEndpointConnectionsClientCreateOrUpdateResponsePoller, err := privateEndpointConnectionsClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.privateEndpointConnectionName, armcosmos.PrivateEndpointConnection{ + Properties: &armcosmos.PrivateEndpointConnectionProperties{ + GroupID: to.Ptr("Sql"), + PrivateLinkServiceConnectionState: &armcosmos.PrivateLinkServiceConnectionStateProperty{ + Description: to.Ptr("Approved by johndoe@contoso.com"), + Status: to.Ptr("Approved"), + }, + ProvisioningState: to.Ptr("Succeeded"), + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, privateEndpointConnectionsClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step PrivateEndpointConnections_ListByDatabaseAccount + fmt.Println("Call operation: PrivateEndpointConnections_ListByDatabaseAccount") + privateEndpointConnectionsClientNewListByDatabaseAccountPager := privateEndpointConnectionsClient.NewListByDatabaseAccountPager(testsuite.resourceGroupName, testsuite.accountName, nil) + for privateEndpointConnectionsClientNewListByDatabaseAccountPager.More() { + _, err := privateEndpointConnectionsClientNewListByDatabaseAccountPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step PrivateEndpointConnections_Get + fmt.Println("Call operation: PrivateEndpointConnections_Get") + _, err = privateEndpointConnectionsClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.privateEndpointConnectionName, nil) + testsuite.Require().NoError(err) + + // From step PrivateLinkResources_ListByDatabaseAccount + fmt.Println("Call operation: PrivateLinkResources_ListByDatabaseAccount") + privateLinkResourcesClient, err := armcosmos.NewPrivateLinkResourcesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + privateLinkResourcesClientNewListByDatabaseAccountPager := privateLinkResourcesClient.NewListByDatabaseAccountPager(testsuite.resourceGroupName, testsuite.accountName, nil) + for privateLinkResourcesClientNewListByDatabaseAccountPager.More() { + _, err := privateLinkResourcesClientNewListByDatabaseAccountPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step PrivateLinkResources_Get + fmt.Println("Call operation: PrivateLinkResources_Get") + _, err = privateLinkResourcesClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, "sql", nil) + testsuite.Require().NoError(err) + + // From step PrivateEndpointConnections_Delete + fmt.Println("Call operation: PrivateEndpointConnections_Delete") + privateEndpointConnectionsClientDeleteResponsePoller, err := privateEndpointConnectionsClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.privateEndpointConnectionName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, privateEndpointConnectionsClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/cosmos/armcosmos/services_live_test.go b/sdk/resourcemanager/cosmos/armcosmos/services_live_test.go new file mode 100644 index 000000000000..0e5922b8ce43 --- /dev/null +++ b/sdk/resourcemanager/cosmos/armcosmos/services_live_test.go @@ -0,0 +1,122 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package armcosmos_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/recording" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/cosmos/armcosmos/v2" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/stretchr/testify/suite" +) + +type ServicesTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + accountName string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *ServicesTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/cosmos/armcosmos/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.accountName, _ = recording.GenerateAlphaNumericID(testsuite.T(), "accountn", 14, true) + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name + testsuite.Prepare() +} + +func (testsuite *ServicesTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestServicesTestSuite(t *testing.T) { + suite.Run(t, new(ServicesTestSuite)) +} + +func (testsuite *ServicesTestSuite) Prepare() { + var err error + // From step DatabaseAccounts_CreateOrUpdate + fmt.Println("Call operation: DatabaseAccounts_CreateOrUpdate") + databaseAccountsClient, err := armcosmos.NewDatabaseAccountsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + databaseAccountsClientCreateOrUpdateResponsePoller, err := databaseAccountsClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, armcosmos.DatabaseAccountCreateUpdateParameters{ + Location: to.Ptr(testsuite.location), + Properties: &armcosmos.DatabaseAccountCreateUpdateProperties{ + CreateMode: to.Ptr(armcosmos.CreateModeDefault), + DatabaseAccountOfferType: to.Ptr("Standard"), + Locations: []*armcosmos.Location{ + { + FailoverPriority: to.Ptr[int32](0), + IsZoneRedundant: to.Ptr(false), + LocationName: to.Ptr(testsuite.location), + }}, + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, databaseAccountsClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) +} + +// Microsoft.DocumentDB/databaseAccounts/{accountName}/services/{serviceName} +func (testsuite *ServicesTestSuite) TestService() { + var err error + // From step Service_Create + fmt.Println("Call operation: Service_Create") + serviceClient, err := armcosmos.NewServiceClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + serviceClientCreateResponsePoller, err := serviceClient.BeginCreate(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, "SqlDedicatedGateway", armcosmos.ServiceResourceCreateUpdateParameters{ + Properties: &armcosmos.ServiceResourceCreateUpdateProperties{ + InstanceCount: to.Ptr[int32](1), + InstanceSize: to.Ptr(armcosmos.ServiceSizeCosmosD4S), + ServiceType: to.Ptr(armcosmos.ServiceTypeSQLDedicatedGateway), + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, serviceClientCreateResponsePoller) + testsuite.Require().NoError(err) + + // From step Service_List + fmt.Println("Call operation: Service_List") + serviceClientNewListPager := serviceClient.NewListPager(testsuite.resourceGroupName, testsuite.accountName, nil) + for serviceClientNewListPager.More() { + _, err := serviceClientNewListPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step Service_Get + fmt.Println("Call operation: Service_Get") + _, err = serviceClient.Get(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, "SqlDedicatedGateway", nil) + testsuite.Require().NoError(err) + + // From step Service_Delete + fmt.Println("Call operation: Service_Delete") + serviceClientDeleteResponsePoller, err := serviceClient.BeginDelete(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, "SqlDedicatedGateway", nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, serviceClientDeleteResponsePoller) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/cosmos/armcosmos/sqlresources_live_test.go b/sdk/resourcemanager/cosmos/armcosmos/sqlresources_live_test.go new file mode 100644 index 000000000000..0de386832bb0 --- /dev/null +++ b/sdk/resourcemanager/cosmos/armcosmos/sqlresources_live_test.go @@ -0,0 +1,465 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package armcosmos_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/recording" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/cosmos/armcosmos/v2" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/stretchr/testify/suite" +) + +type SqlResourcesTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + accountName string + containerName string + databaseName string + triggerName string + storedProcedureName string + userDefinedFunctionName string + clientEncryptionKeyName string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *SqlResourcesTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/cosmos/armcosmos/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.accountName, _ = recording.GenerateAlphaNumericID(testsuite.T(), "accountn", 14, true) + testsuite.containerName, _ = recording.GenerateAlphaNumericID(testsuite.T(), "sqlcontaine", 17, false) + testsuite.databaseName, _ = recording.GenerateAlphaNumericID(testsuite.T(), "sqldb", 11, false) + testsuite.triggerName, _ = recording.GenerateAlphaNumericID(testsuite.T(), "triggern", 14, false) + testsuite.storedProcedureName, _ = recording.GenerateAlphaNumericID(testsuite.T(), "storedpr", 14, true) + testsuite.userDefinedFunctionName, _ = recording.GenerateAlphaNumericID(testsuite.T(), "userdefi", 14, false) + testsuite.clientEncryptionKeyName, _ = recording.GenerateAlphaNumericID(testsuite.T(), "clienten", 14, false) + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name + testsuite.Prepare() +} + +func (testsuite *SqlResourcesTestSuite) TearDownSuite() { + testsuite.Cleanup() + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestSqlResourcesTestSuite(t *testing.T) { + suite.Run(t, new(SqlResourcesTestSuite)) +} + +func (testsuite *SqlResourcesTestSuite) Prepare() { + var err error + // From step DatabaseAccounts_CreateOrUpdate + fmt.Println("Call operation: DatabaseAccounts_CreateOrUpdate") + databaseAccountsClient, err := armcosmos.NewDatabaseAccountsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + databaseAccountsClientCreateOrUpdateResponsePoller, err := databaseAccountsClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, armcosmos.DatabaseAccountCreateUpdateParameters{ + Location: to.Ptr(testsuite.location), + Properties: &armcosmos.DatabaseAccountCreateUpdateProperties{ + CreateMode: to.Ptr(armcosmos.CreateModeDefault), + DatabaseAccountOfferType: to.Ptr("Standard"), + Locations: []*armcosmos.Location{ + { + FailoverPriority: to.Ptr[int32](0), + IsZoneRedundant: to.Ptr(false), + LocationName: to.Ptr(testsuite.location), + }}, + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, databaseAccountsClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) + + // From step SqlResources_CreateUpdateSqlDatabase + fmt.Println("Call operation: SQLResources_CreateUpdateSQLDatabase") + sQLResourcesClient, err := armcosmos.NewSQLResourcesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + sQLResourcesClientCreateUpdateSQLDatabaseResponsePoller, err := sQLResourcesClient.BeginCreateUpdateSQLDatabase(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, armcosmos.SQLDatabaseCreateUpdateParameters{ + Location: to.Ptr(testsuite.location), + Tags: map[string]*string{}, + Properties: &armcosmos.SQLDatabaseCreateUpdateProperties{ + Options: &armcosmos.CreateUpdateOptions{ + Throughput: to.Ptr[int32](2000), + }, + Resource: &armcosmos.SQLDatabaseResource{ + ID: to.Ptr(testsuite.databaseName), + }, + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, sQLResourcesClientCreateUpdateSQLDatabaseResponsePoller) + testsuite.Require().NoError(err) + + // From step SqlResources_CreateUpdateSqlContainer + fmt.Println("Call operation: SQLResources_CreateUpdateSQLContainer") + sQLResourcesClientCreateUpdateSQLContainerResponsePoller, err := sQLResourcesClient.BeginCreateUpdateSQLContainer(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, testsuite.containerName, armcosmos.SQLContainerCreateUpdateParameters{ + Location: to.Ptr(testsuite.location), + Tags: map[string]*string{}, + Properties: &armcosmos.SQLContainerCreateUpdateProperties{ + Options: &armcosmos.CreateUpdateOptions{ + Throughput: to.Ptr[int32](2000), + }, + Resource: &armcosmos.SQLContainerResource{ + ConflictResolutionPolicy: &armcosmos.ConflictResolutionPolicy{ + ConflictResolutionPath: to.Ptr("/path"), + Mode: to.Ptr(armcosmos.ConflictResolutionModeLastWriterWins), + }, + ID: to.Ptr(testsuite.containerName), + IndexingPolicy: &armcosmos.IndexingPolicy{ + Automatic: to.Ptr(true), + ExcludedPaths: []*armcosmos.ExcludedPath{}, + IncludedPaths: []*armcosmos.IncludedPath{ + { + Path: to.Ptr("/*"), + Indexes: []*armcosmos.Indexes{ + { + DataType: to.Ptr(armcosmos.DataTypeString), + Kind: to.Ptr(armcosmos.IndexKindRange), + Precision: to.Ptr[int32](-1), + }, + { + DataType: to.Ptr(armcosmos.DataTypeNumber), + Kind: to.Ptr(armcosmos.IndexKindRange), + Precision: to.Ptr[int32](-1), + }}, + }}, + IndexingMode: to.Ptr(armcosmos.IndexingModeConsistent), + }, + PartitionKey: &armcosmos.ContainerPartitionKey{ + Kind: to.Ptr(armcosmos.PartitionKindHash), + Paths: []*string{ + to.Ptr("/AccountNumber")}, + }, + UniqueKeyPolicy: &armcosmos.UniqueKeyPolicy{ + UniqueKeys: []*armcosmos.UniqueKey{ + { + Paths: []*string{ + to.Ptr("/testPath")}, + }}, + }, + }, + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, sQLResourcesClientCreateUpdateSQLContainerResponsePoller) + testsuite.Require().NoError(err) +} + +// Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName} +func (testsuite *SqlResourcesTestSuite) TestSqlDatabase() { + var err error + // From step SqlResources_ListSqlDatabases + fmt.Println("Call operation: SQLResources_ListSQLDatabases") + sQLResourcesClient, err := armcosmos.NewSQLResourcesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + sQLResourcesClientNewListSQLDatabasesPager := sQLResourcesClient.NewListSQLDatabasesPager(testsuite.resourceGroupName, testsuite.accountName, nil) + for sQLResourcesClientNewListSQLDatabasesPager.More() { + _, err := sQLResourcesClientNewListSQLDatabasesPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step SqlResources_GetSqlDatabase + fmt.Println("Call operation: SQLResources_GetSQLDatabase") + _, err = sQLResourcesClient.GetSQLDatabase(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, nil) + testsuite.Require().NoError(err) + + // From step SqlResources_GetSqlDatabaseThroughput + fmt.Println("Call operation: SQLResources_GetSQLDatabaseThroughput") + _, err = sQLResourcesClient.GetSQLDatabaseThroughput(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, nil) + testsuite.Require().NoError(err) + + // From step SqlResources_UpdateSqlDatabaseThroughput + fmt.Println("Call operation: SQLResources_UpdateSQLDatabaseThroughput") + sQLResourcesClientUpdateSQLDatabaseThroughputResponsePoller, err := sQLResourcesClient.BeginUpdateSQLDatabaseThroughput(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, armcosmos.ThroughputSettingsUpdateParameters{ + Location: to.Ptr(testsuite.location), + Tags: map[string]*string{}, + Properties: &armcosmos.ThroughputSettingsUpdateProperties{ + Resource: &armcosmos.ThroughputSettingsResource{ + Throughput: to.Ptr[int32](400), + }, + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, sQLResourcesClientUpdateSQLDatabaseThroughputResponsePoller) + testsuite.Require().NoError(err) + + // From step SqlResources_MigrateSqlDatabaseToAutoscale + fmt.Println("Call operation: SQLResources_MigrateSQLDatabaseToAutoscale") + sQLResourcesClientMigrateSQLDatabaseToAutoscaleResponsePoller, err := sQLResourcesClient.BeginMigrateSQLDatabaseToAutoscale(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, sQLResourcesClientMigrateSQLDatabaseToAutoscaleResponsePoller) + testsuite.Require().NoError(err) + + // From step SqlResources_MigrateSqlDatabaseToManualThroughput + fmt.Println("Call operation: SQLResources_MigrateSQLDatabaseToManualThroughput") + sQLResourcesClientMigrateSQLDatabaseToManualThroughputResponsePoller, err := sQLResourcesClient.BeginMigrateSQLDatabaseToManualThroughput(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, sQLResourcesClientMigrateSQLDatabaseToManualThroughputResponsePoller) + testsuite.Require().NoError(err) +} + +// Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName} +func (testsuite *SqlResourcesTestSuite) TestSqlContainer() { + var err error + // From step SqlResources_GetSqlContainer + fmt.Println("Call operation: SQLResources_GetSQLContainer") + sQLResourcesClient, err := armcosmos.NewSQLResourcesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + _, err = sQLResourcesClient.GetSQLContainer(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, testsuite.containerName, nil) + testsuite.Require().NoError(err) + + // From step SqlResources_ListSqlContainers + fmt.Println("Call operation: SQLResources_ListSQLContainers") + sQLResourcesClientNewListSQLContainersPager := sQLResourcesClient.NewListSQLContainersPager(testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, nil) + for sQLResourcesClientNewListSQLContainersPager.More() { + _, err := sQLResourcesClientNewListSQLContainersPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step SqlResources_GetSqlContainerThroughput + fmt.Println("Call operation: SQLResources_GetSQLContainerThroughput") + _, err = sQLResourcesClient.GetSQLContainerThroughput(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, testsuite.containerName, nil) + testsuite.Require().NoError(err) + + // From step SqlResources_UpdateSqlContainerThroughput + fmt.Println("Call operation: SQLResources_UpdateSQLContainerThroughput") + sQLResourcesClientUpdateSQLContainerThroughputResponsePoller, err := sQLResourcesClient.BeginUpdateSQLContainerThroughput(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, testsuite.containerName, armcosmos.ThroughputSettingsUpdateParameters{ + Location: to.Ptr(testsuite.location), + Tags: map[string]*string{}, + Properties: &armcosmos.ThroughputSettingsUpdateProperties{ + Resource: &armcosmos.ThroughputSettingsResource{ + Throughput: to.Ptr[int32](400), + }, + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, sQLResourcesClientUpdateSQLContainerThroughputResponsePoller) + testsuite.Require().NoError(err) + + // From step SqlResources_MigrateSqlContainerToAutoscale + fmt.Println("Call operation: SQLResources_MigrateSQLContainerToAutoscale") + sQLResourcesClientMigrateSQLContainerToAutoscaleResponsePoller, err := sQLResourcesClient.BeginMigrateSQLContainerToAutoscale(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, testsuite.containerName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, sQLResourcesClientMigrateSQLContainerToAutoscaleResponsePoller) + testsuite.Require().NoError(err) + + // From step SqlResources_MigrateSqlContainerToManualThroughput + fmt.Println("Call operation: SQLResources_MigrateSQLContainerToManualThroughput") + sQLResourcesClientMigrateSQLContainerToManualThroughputResponsePoller, err := sQLResourcesClient.BeginMigrateSQLContainerToManualThroughput(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, testsuite.containerName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, sQLResourcesClientMigrateSQLContainerToManualThroughputResponsePoller) + testsuite.Require().NoError(err) +} + +// Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/triggers/{triggerName} +func (testsuite *SqlResourcesTestSuite) TestSqlTrigger() { + var err error + // From step SqlResources_CreateUpdateSqlTrigger + fmt.Println("Call operation: SQLResources_CreateUpdateSQLTrigger") + sQLResourcesClient, err := armcosmos.NewSQLResourcesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + sQLResourcesClientCreateUpdateSQLTriggerResponsePoller, err := sQLResourcesClient.BeginCreateUpdateSQLTrigger(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, testsuite.containerName, testsuite.triggerName, armcosmos.SQLTriggerCreateUpdateParameters{ + Properties: &armcosmos.SQLTriggerCreateUpdateProperties{ + Options: &armcosmos.CreateUpdateOptions{}, + Resource: &armcosmos.SQLTriggerResource{ + Body: to.Ptr("body"), + ID: to.Ptr(testsuite.triggerName), + TriggerOperation: to.Ptr(armcosmos.TriggerOperationAll), + TriggerType: to.Ptr(armcosmos.TriggerTypePre), + }, + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, sQLResourcesClientCreateUpdateSQLTriggerResponsePoller) + testsuite.Require().NoError(err) + + // From step SqlResources_GetSqlTrigger + fmt.Println("Call operation: SQLResources_GetSQLTrigger") + _, err = sQLResourcesClient.GetSQLTrigger(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, testsuite.containerName, testsuite.triggerName, nil) + testsuite.Require().NoError(err) + + // From step SqlResources_ListSqlTriggers + fmt.Println("Call operation: SQLResources_ListSQLTriggers") + sQLResourcesClientNewListSQLTriggersPager := sQLResourcesClient.NewListSQLTriggersPager(testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, testsuite.containerName, nil) + for sQLResourcesClientNewListSQLTriggersPager.More() { + _, err := sQLResourcesClientNewListSQLTriggersPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step SqlResources_DeleteSqlTrigger + fmt.Println("Call operation: SQLResources_DeleteSQLTrigger") + sQLResourcesClientDeleteSQLTriggerResponsePoller, err := sQLResourcesClient.BeginDeleteSQLTrigger(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, testsuite.containerName, testsuite.triggerName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, sQLResourcesClientDeleteSQLTriggerResponsePoller) + testsuite.Require().NoError(err) +} + +// Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/storedProcedures/{storedProcedureName} +func (testsuite *SqlResourcesTestSuite) TestSqlStoredProcedure() { + var err error + // From step SqlResources_CreateUpdateSqlStoredProcedure + fmt.Println("Call operation: SQLResources_CreateUpdateSQLStoredProcedure") + sQLResourcesClient, err := armcosmos.NewSQLResourcesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + sQLResourcesClientCreateUpdateSQLStoredProcedureResponsePoller, err := sQLResourcesClient.BeginCreateUpdateSQLStoredProcedure(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, testsuite.containerName, testsuite.storedProcedureName, armcosmos.SQLStoredProcedureCreateUpdateParameters{ + Properties: &armcosmos.SQLStoredProcedureCreateUpdateProperties{ + Options: &armcosmos.CreateUpdateOptions{}, + Resource: &armcosmos.SQLStoredProcedureResource{ + Body: to.Ptr("body"), + ID: to.Ptr(testsuite.storedProcedureName), + }, + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, sQLResourcesClientCreateUpdateSQLStoredProcedureResponsePoller) + testsuite.Require().NoError(err) + + // From step SqlResources_GetSqlStoredProcedure + fmt.Println("Call operation: SQLResources_GetSQLStoredProcedure") + _, err = sQLResourcesClient.GetSQLStoredProcedure(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, testsuite.containerName, testsuite.storedProcedureName, nil) + testsuite.Require().NoError(err) + + // From step SqlResources_ListSqlStoredProcedures + fmt.Println("Call operation: SQLResources_ListSQLStoredProcedures") + sQLResourcesClientNewListSQLStoredProceduresPager := sQLResourcesClient.NewListSQLStoredProceduresPager(testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, testsuite.containerName, nil) + for sQLResourcesClientNewListSQLStoredProceduresPager.More() { + _, err := sQLResourcesClientNewListSQLStoredProceduresPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step SqlResources_DeleteSqlStoredProcedure + fmt.Println("Call operation: SQLResources_DeleteSQLStoredProcedure") + sQLResourcesClientDeleteSQLStoredProcedureResponsePoller, err := sQLResourcesClient.BeginDeleteSQLStoredProcedure(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, testsuite.containerName, testsuite.storedProcedureName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, sQLResourcesClientDeleteSQLStoredProcedureResponsePoller) + testsuite.Require().NoError(err) +} + +// Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/containers/{containerName}/userDefinedFunctions/{userDefinedFunctionName} +func (testsuite *SqlResourcesTestSuite) TestSqlUserDefinedFunction() { + var err error + // From step SqlResources_CreateUpdateSqlUserDefinedFunction + fmt.Println("Call operation: SQLResources_CreateUpdateSQLUserDefinedFunction") + sQLResourcesClient, err := armcosmos.NewSQLResourcesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + sQLResourcesClientCreateUpdateSQLUserDefinedFunctionResponsePoller, err := sQLResourcesClient.BeginCreateUpdateSQLUserDefinedFunction(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, testsuite.containerName, testsuite.userDefinedFunctionName, armcosmos.SQLUserDefinedFunctionCreateUpdateParameters{ + Properties: &armcosmos.SQLUserDefinedFunctionCreateUpdateProperties{ + Options: &armcosmos.CreateUpdateOptions{}, + Resource: &armcosmos.SQLUserDefinedFunctionResource{ + Body: to.Ptr("body"), + ID: to.Ptr(testsuite.userDefinedFunctionName), + }, + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, sQLResourcesClientCreateUpdateSQLUserDefinedFunctionResponsePoller) + testsuite.Require().NoError(err) + + // From step SqlResources_GetSqlUserDefinedFunction + fmt.Println("Call operation: SQLResources_GetSQLUserDefinedFunction") + _, err = sQLResourcesClient.GetSQLUserDefinedFunction(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, testsuite.containerName, testsuite.userDefinedFunctionName, nil) + testsuite.Require().NoError(err) + + // From step SqlResources_ListSqlUserDefinedFunctions + fmt.Println("Call operation: SQLResources_ListSQLUserDefinedFunctions") + sQLResourcesClientNewListSQLUserDefinedFunctionsPager := sQLResourcesClient.NewListSQLUserDefinedFunctionsPager(testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, testsuite.containerName, nil) + for sQLResourcesClientNewListSQLUserDefinedFunctionsPager.More() { + _, err := sQLResourcesClientNewListSQLUserDefinedFunctionsPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step SqlResources_DeleteSqlUserDefinedFunction + fmt.Println("Call operation: SQLResources_DeleteSQLUserDefinedFunction") + sQLResourcesClientDeleteSQLUserDefinedFunctionResponsePoller, err := sQLResourcesClient.BeginDeleteSQLUserDefinedFunction(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, testsuite.containerName, testsuite.userDefinedFunctionName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, sQLResourcesClientDeleteSQLUserDefinedFunctionResponsePoller) + testsuite.Require().NoError(err) +} + +// Microsoft.DocumentDB/databaseAccounts/{accountName}/sqlDatabases/{databaseName}/clientEncryptionKeys/{clientEncryptionKeyName} +func (testsuite *SqlResourcesTestSuite) TestSqlClientEncryptionKey() { + var err error + // From step SqlResources_CreateUpdateClientEncryptionKey + fmt.Println("Call operation: SQLResources_CreateUpdateClientEncryptionKey") + sQLResourcesClient, err := armcosmos.NewSQLResourcesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + sQLResourcesClientCreateUpdateClientEncryptionKeyResponsePoller, err := sQLResourcesClient.BeginCreateUpdateClientEncryptionKey(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, testsuite.clientEncryptionKeyName, armcosmos.ClientEncryptionKeyCreateUpdateParameters{ + Properties: &armcosmos.ClientEncryptionKeyCreateUpdateProperties{ + Resource: &armcosmos.ClientEncryptionKeyResource{ + EncryptionAlgorithm: to.Ptr("AEAD_AES_256_CBC_HMAC_SHA256"), + ID: to.Ptr(testsuite.clientEncryptionKeyName), + KeyWrapMetadata: &armcosmos.KeyWrapMetadata{ + Name: to.Ptr("customerManagedKey"), + Type: to.Ptr("AzureKeyVault"), + Algorithm: to.Ptr("RSA-OAEP"), + Value: to.Ptr("AzureKeyVault Key URL"), + }, + WrappedDataEncryptionKey: []byte("U3dhZ2dlciByb2Nrcw=="), + }, + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, sQLResourcesClientCreateUpdateClientEncryptionKeyResponsePoller) + testsuite.Require().NoError(err) + + // From step SqlResources_GetClientEncryptionKey + fmt.Println("Call operation: SQLResources_GetClientEncryptionKey") + _, err = sQLResourcesClient.GetClientEncryptionKey(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, testsuite.clientEncryptionKeyName, nil) + testsuite.Require().NoError(err) + + // From step SqlResources_ListClientEncryptionKeys + fmt.Println("Call operation: SQLResources_ListClientEncryptionKeys") + sQLResourcesClientNewListClientEncryptionKeysPager := sQLResourcesClient.NewListClientEncryptionKeysPager(testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, nil) + for sQLResourcesClientNewListClientEncryptionKeysPager.More() { + _, err := sQLResourcesClientNewListClientEncryptionKeysPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } +} + +func (testsuite *SqlResourcesTestSuite) Cleanup() { + var err error + // From step SqlResources_DeleteSqlContainer + fmt.Println("Call operation: SQLResources_DeleteSQLContainer") + sQLResourcesClient, err := armcosmos.NewSQLResourcesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + sQLResourcesClientDeleteSQLContainerResponsePoller, err := sQLResourcesClient.BeginDeleteSQLContainer(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, testsuite.containerName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, sQLResourcesClientDeleteSQLContainerResponsePoller) + testsuite.Require().NoError(err) + + // From step SqlResources_DeleteSqlDatabase + fmt.Println("Call operation: SQLResources_DeleteSQLDatabase") + sQLResourcesClientDeleteSQLDatabaseResponsePoller, err := sQLResourcesClient.BeginDeleteSQLDatabase(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.databaseName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, sQLResourcesClientDeleteSQLDatabaseResponsePoller) + testsuite.Require().NoError(err) +} diff --git a/sdk/resourcemanager/cosmos/armcosmos/tableresources_live_test.go b/sdk/resourcemanager/cosmos/armcosmos/tableresources_live_test.go new file mode 100644 index 000000000000..3f72a33f3c5f --- /dev/null +++ b/sdk/resourcemanager/cosmos/armcosmos/tableresources_live_test.go @@ -0,0 +1,166 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package armcosmos_test + +import ( + "context" + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/recording" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/cosmos/armcosmos/v2" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/testutil" + "github.com/stretchr/testify/suite" +) + +type TableResourcesTestSuite struct { + suite.Suite + + ctx context.Context + cred azcore.TokenCredential + options *arm.ClientOptions + accountName string + tableName string + location string + resourceGroupName string + subscriptionId string +} + +func (testsuite *TableResourcesTestSuite) SetupSuite() { + testutil.StartRecording(testsuite.T(), "sdk/resourcemanager/cosmos/armcosmos/testdata") + + testsuite.ctx = context.Background() + testsuite.cred, testsuite.options = testutil.GetCredAndClientOptions(testsuite.T()) + testsuite.accountName, _ = recording.GenerateAlphaNumericID(testsuite.T(), "accountn", 14, true) + testsuite.tableName, _ = recording.GenerateAlphaNumericID(testsuite.T(), "tablenam", 14, false) + testsuite.location = testutil.GetEnv("LOCATION", "westus") + testsuite.resourceGroupName = testutil.GetEnv("RESOURCE_GROUP_NAME", "scenarioTestTempGroup") + testsuite.subscriptionId = testutil.GetEnv("AZURE_SUBSCRIPTION_ID", "00000000-0000-0000-0000-000000000000") + resourceGroup, _, err := testutil.CreateResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.location) + testsuite.Require().NoError(err) + testsuite.resourceGroupName = *resourceGroup.Name + testsuite.Prepare() +} + +func (testsuite *TableResourcesTestSuite) TearDownSuite() { + _, err := testutil.DeleteResourceGroup(testsuite.ctx, testsuite.subscriptionId, testsuite.cred, testsuite.options, testsuite.resourceGroupName) + testsuite.Require().NoError(err) + testutil.StopRecording(testsuite.T()) +} + +func TestTableResourcesTestSuite(t *testing.T) { + suite.Run(t, new(TableResourcesTestSuite)) +} + +func (testsuite *TableResourcesTestSuite) Prepare() { + var err error + // From step DatabaseAccounts_CreateOrUpdate + fmt.Println("Call operation: DatabaseAccounts_CreateOrUpdate") + databaseAccountsClient, err := armcosmos.NewDatabaseAccountsClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + databaseAccountsClientCreateOrUpdateResponsePoller, err := databaseAccountsClient.BeginCreateOrUpdate(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, armcosmos.DatabaseAccountCreateUpdateParameters{ + Location: to.Ptr(testsuite.location), + Kind: to.Ptr(armcosmos.DatabaseAccountKindGlobalDocumentDB), + Properties: &armcosmos.DatabaseAccountCreateUpdateProperties{ + Capabilities: []*armcosmos.Capability{ + { + Name: to.Ptr("EnableTable"), + }}, + CreateMode: to.Ptr(armcosmos.CreateModeDefault), + DatabaseAccountOfferType: to.Ptr("Standard"), + Locations: []*armcosmos.Location{ + { + FailoverPriority: to.Ptr[int32](0), + IsZoneRedundant: to.Ptr(false), + LocationName: to.Ptr(testsuite.location), + }}, + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, databaseAccountsClientCreateOrUpdateResponsePoller) + testsuite.Require().NoError(err) +} + +// Microsoft.DocumentDB/databaseAccounts/{accountName}/tables/{tableName} +func (testsuite *TableResourcesTestSuite) TestTableResources() { + var err error + // From step TableResources_CreateUpdateTable + fmt.Println("Call operation: TableResources_CreateUpdateTable") + tableResourcesClient, err := armcosmos.NewTableResourcesClient(testsuite.subscriptionId, testsuite.cred, testsuite.options) + testsuite.Require().NoError(err) + tableResourcesClientCreateUpdateTableResponsePoller, err := tableResourcesClient.BeginCreateUpdateTable(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.tableName, armcosmos.TableCreateUpdateParameters{ + Location: to.Ptr(testsuite.location), + Tags: map[string]*string{}, + Properties: &armcosmos.TableCreateUpdateProperties{ + Options: &armcosmos.CreateUpdateOptions{}, + Resource: &armcosmos.TableResource{ + ID: to.Ptr(testsuite.tableName), + }, + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, tableResourcesClientCreateUpdateTableResponsePoller) + testsuite.Require().NoError(err) + + // From step TableResources_ListTables + fmt.Println("Call operation: TableResources_ListTables") + tableResourcesClientNewListTablesPager := tableResourcesClient.NewListTablesPager(testsuite.resourceGroupName, testsuite.accountName, nil) + for tableResourcesClientNewListTablesPager.More() { + _, err := tableResourcesClientNewListTablesPager.NextPage(testsuite.ctx) + testsuite.Require().NoError(err) + break + } + + // From step TableResources_GetTableThroughput + fmt.Println("Call operation: TableResources_GetTableThroughput") + _, err = tableResourcesClient.GetTableThroughput(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.tableName, nil) + testsuite.Require().NoError(err) + + // From step TableResources_GetTable + fmt.Println("Call operation: TableResources_GetTable") + _, err = tableResourcesClient.GetTable(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.tableName, nil) + testsuite.Require().NoError(err) + + // From step TableResources_MigrateTableToAutoscale + fmt.Println("Call operation: TableResources_MigrateTableToAutoscale") + tableResourcesClientMigrateTableToAutoscaleResponsePoller, err := tableResourcesClient.BeginMigrateTableToAutoscale(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.tableName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, tableResourcesClientMigrateTableToAutoscaleResponsePoller) + testsuite.Require().NoError(err) + + // From step TableResources_MigrateTableToManualThroughput + fmt.Println("Call operation: TableResources_MigrateTableToManualThroughput") + tableResourcesClientMigrateTableToManualThroughputResponsePoller, err := tableResourcesClient.BeginMigrateTableToManualThroughput(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.tableName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, tableResourcesClientMigrateTableToManualThroughputResponsePoller) + testsuite.Require().NoError(err) + + // From step TableResources_UpdateTableThroughput + fmt.Println("Call operation: TableResources_UpdateTableThroughput") + tableResourcesClientUpdateTableThroughputResponsePoller, err := tableResourcesClient.BeginUpdateTableThroughput(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.tableName, armcosmos.ThroughputSettingsUpdateParameters{ + Location: to.Ptr(testsuite.location), + Tags: map[string]*string{}, + Properties: &armcosmos.ThroughputSettingsUpdateProperties{ + Resource: &armcosmos.ThroughputSettingsResource{ + Throughput: to.Ptr[int32](400), + }, + }, + }, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, tableResourcesClientUpdateTableThroughputResponsePoller) + testsuite.Require().NoError(err) + + // From step TableResources_DeleteTable + fmt.Println("Call operation: TableResources_DeleteTable") + tableResourcesClientDeleteTableResponsePoller, err := tableResourcesClient.BeginDeleteTable(testsuite.ctx, testsuite.resourceGroupName, testsuite.accountName, testsuite.tableName, nil) + testsuite.Require().NoError(err) + _, err = testutil.PollForTest(testsuite.ctx, tableResourcesClientDeleteTableResponsePoller) + testsuite.Require().NoError(err) +} From 9c9d62a1d612cf1f9e6902ddd3c538abd90b955c Mon Sep 17 00:00:00 2001 From: Azure SDK Bot <53356347+azure-sdk@users.noreply.github.com> Date: Thu, 4 May 2023 09:50:16 -0700 Subject: [PATCH 19/50] Increment package version after release of azcore (#20740) --- sdk/azcore/CHANGELOG.md | 10 ++++++++++ sdk/azcore/internal/shared/constants.go | 2 +- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/sdk/azcore/CHANGELOG.md b/sdk/azcore/CHANGELOG.md index 7ecc8f2a9679..df18a5205d4e 100644 --- a/sdk/azcore/CHANGELOG.md +++ b/sdk/azcore/CHANGELOG.md @@ -1,5 +1,15 @@ # Release History +## 1.6.1 (Unreleased) + +### Features Added + +### Breaking Changes + +### Bugs Fixed + +### Other Changes + ## 1.6.0 (2023-05-04) ### Features Added diff --git a/sdk/azcore/internal/shared/constants.go b/sdk/azcore/internal/shared/constants.go index 681167bcba57..269a831ed178 100644 --- a/sdk/azcore/internal/shared/constants.go +++ b/sdk/azcore/internal/shared/constants.go @@ -32,5 +32,5 @@ const ( Module = "azcore" // Version is the semantic version (see http://semver.org) of this module. - Version = "v1.6.0" + Version = "v1.6.1" ) From 8bc34503b06adbb59fedf8a4bf884d818a15c35d Mon Sep 17 00:00:00 2001 From: Richard Park <51494936+richardpark-msft@users.noreply.github.com> Date: Thu, 4 May 2023 10:37:50 -0700 Subject: [PATCH 20/50] [azeventhubs] Improperly resetting etag in the checkpoint store (#20737) We shouldn't be resetting the etag to nil - it's what we use to enforce a "single winner" when doing ownership claims. The bug here was two-fold: I had bad logic in my previous claim ownership, which I fixed in a previous PR, but we need to reflect that same constraint properly in our in-memory checkpoint store for these tests. --- .../inmemory_checkpoint_store_test.go | 13 ++++++++---- sdk/messaging/azeventhubs/internal/errors.go | 1 + .../azeventhubs/processor_load_balancer.go | 20 ++++++++++++++++++- 3 files changed, 29 insertions(+), 5 deletions(-) diff --git a/sdk/messaging/azeventhubs/inmemory_checkpoint_store_test.go b/sdk/messaging/azeventhubs/inmemory_checkpoint_store_test.go index be8978f9c292..ea3aa5677ee6 100644 --- a/sdk/messaging/azeventhubs/inmemory_checkpoint_store_test.go +++ b/sdk/messaging/azeventhubs/inmemory_checkpoint_store_test.go @@ -206,13 +206,18 @@ func (cps *testCheckpointStore) ClaimOwnership(ctx context.Context, partitionOwn current, exists := cps.ownerships[key] - if exists && po.ETag != nil && *current.ETag != *po.ETag { - // can't own it, didn't have the expected etag - return nil, nil + if exists { + if po.ETag == nil { + panic("Ownership blob exists, we should have claimed it using an etag") + } + + if *po.ETag != *current.ETag { + // can't own it, didn't have the expected etag + return nil, nil + } } newOwnership := po - uuid, err := uuid.New() if err != nil { diff --git a/sdk/messaging/azeventhubs/internal/errors.go b/sdk/messaging/azeventhubs/internal/errors.go index 2f9fb1a90b56..c9e011725dfc 100644 --- a/sdk/messaging/azeventhubs/internal/errors.go +++ b/sdk/messaging/azeventhubs/internal/errors.go @@ -148,6 +148,7 @@ var amqpConditionsToRecoveryKind = map[amqp.ErrCond]RecoveryKind{ amqp.ErrCondNotAllowed: RecoveryKindFatal, // "amqp:not-allowed" amqp.ErrCond("com.microsoft:entity-disabled"): RecoveryKindFatal, // entity is disabled in the portal amqp.ErrCond("com.microsoft:session-cannot-be-locked"): RecoveryKindFatal, + amqp.ErrCond("com.microsoft:argument-out-of-range"): RecoveryKindFatal, // asked for a partition ID that doesn't exist errorConditionLockLost: RecoveryKindFatal, } diff --git a/sdk/messaging/azeventhubs/processor_load_balancer.go b/sdk/messaging/azeventhubs/processor_load_balancer.go index 419e227ee163..99f396cfd652 100644 --- a/sdk/messaging/azeventhubs/processor_load_balancer.go +++ b/sdk/messaging/azeventhubs/processor_load_balancer.go @@ -9,6 +9,8 @@ import ( "math" "math/rand" "time" + + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" ) type processorLoadBalancer struct { @@ -73,6 +75,7 @@ func (lb *processorLoadBalancer) LoadBalance(ctx context.Context, partitionIDs [ // - I have too many. We expect to have some stolen from us, but we'll maintain // ownership for now. claimMorePartitions = false + log.Writef(EventConsumer, "Owns %d/%d, no more needed", len(lbinfo.current), lbinfo.maxAllowed) } else if lbinfo.extraPartitionPossible && len(lbinfo.current) == lbinfo.maxAllowed-1 { // In the 'extraPartitionPossible' scenario, some consumers will have an extra partition // since things don't divide up evenly. We're one under the max, which means we _might_ @@ -81,6 +84,10 @@ func (lb *processorLoadBalancer) LoadBalance(ctx context.Context, partitionIDs [ // We will attempt to grab _one_ more but only if there are free partitions available // or if one of the consumers has more than the max allowed. claimMorePartitions = len(lbinfo.unownedOrExpired) > 0 || len(lbinfo.aboveMax) > 0 + log.Writef(EventConsumer, "Unowned/expired: %d, above max: %d, need to claim: %t", + len(lbinfo.unownedOrExpired), + len(lbinfo.aboveMax), + claimMorePartitions) } ownerships := lbinfo.current @@ -88,8 +95,10 @@ func (lb *processorLoadBalancer) LoadBalance(ctx context.Context, partitionIDs [ if claimMorePartitions { switch lb.strategy { case ProcessorStrategyGreedy: + log.Writef(EventConsumer, "Using greedy strategy to claim partitions") ownerships = lb.greedyLoadBalancer(ctx, lbinfo) case ProcessorStrategyBalanced: + log.Writef(EventConsumer, "Using balanced strategy to claim partitions") o := lb.balancedLoadBalancer(ctx, lbinfo) if o != nil { @@ -106,6 +115,8 @@ func (lb *processorLoadBalancer) LoadBalance(ctx context.Context, partitionIDs [ // getAvailablePartitions finds all partitions that are either completely unowned _or_ // their ownership is stale. func (lb *processorLoadBalancer) getAvailablePartitions(ctx context.Context, partitionIDs []string) (loadBalancerInfo, error) { + log.Writef(EventConsumer, "[%s] Listing ownership for %s/%s/%s", lb.details.ClientID, lb.details.FullyQualifiedNamespace, lb.details.EventHubName, lb.details.ConsumerGroup) + ownerships, err := lb.checkpointStore.ListOwnership(ctx, lb.details.FullyQualifiedNamespace, lb.details.EventHubName, lb.details.ConsumerGroup, nil) if err != nil { @@ -132,6 +143,9 @@ func (lb *processorLoadBalancer) getAvailablePartitions(ctx context.Context, par groupedByOwner[o.OwnerID] = append(groupedByOwner[o.OwnerID], o) } + numExpired := len(unownedOrExpired) + log.Writef(EventConsumer, "Expired: %d", numExpired) + // add in all the unowned partitions for _, partID := range partitionIDs { if alreadyAdded[partID] { @@ -149,6 +163,8 @@ func (lb *processorLoadBalancer) getAvailablePartitions(ctx context.Context, par }) } + log.Writef(EventConsumer, "Unowned: %d", len(unownedOrExpired)-numExpired) + maxAllowed := len(partitionIDs) / len(groupedByOwner) hasRemainder := len(partitionIDs)%len(groupedByOwner) > 0 @@ -188,6 +204,8 @@ func (lb *processorLoadBalancer) greedyLoadBalancer(ctx context.Context, lbinfo ours = append(ours, randomOwnerships...) if len(ours) < lbinfo.maxAllowed { + log.Writef(EventConsumer, "Not enough expired or unowned partitions, will need to steal from other processors") + // if that's not enough then we'll randomly steal from any owners that had partitions // above the maximum. randomOwnerships := getRandomOwnerships(lb.rnd, lbinfo.aboveMax, lbinfo.maxAllowed-len(ours)) @@ -197,6 +215,7 @@ func (lb *processorLoadBalancer) greedyLoadBalancer(ctx context.Context, lbinfo for i := 0; i < len(ours); i++ { ours[i] = lb.resetOwnership(ours[i]) } + return ours } @@ -225,7 +244,6 @@ func (lb *processorLoadBalancer) balancedLoadBalancer(ctx context.Context, lbinf } func (lb *processorLoadBalancer) resetOwnership(o Ownership) Ownership { - o.ETag = nil o.OwnerID = lb.details.ClientID return o } From e1a6152d76310e552a346d39d5672368527a3a4b Mon Sep 17 00:00:00 2001 From: Azure SDK Bot <53356347+azure-sdk@users.noreply.github.com> Date: Thu, 4 May 2023 14:58:28 -0700 Subject: [PATCH 21/50] Eng workflows sync and branch cleanup additions (#20743) Co-authored-by: James Suplizio --- .../templates/steps/eng-common-workflow-enforcer.yml | 12 +++++++++++- eng/common/scripts/Delete-RemoteBranches.ps1 | 11 ++++++----- 2 files changed, 17 insertions(+), 6 deletions(-) diff --git a/eng/common/pipelines/templates/steps/eng-common-workflow-enforcer.yml b/eng/common/pipelines/templates/steps/eng-common-workflow-enforcer.yml index ff4927b8bb5f..af8b009b5825 100644 --- a/eng/common/pipelines/templates/steps/eng-common-workflow-enforcer.yml +++ b/eng/common/pipelines/templates/steps/eng-common-workflow-enforcer.yml @@ -19,5 +19,15 @@ steps: exit 1 } } - displayName: Prevent changes to eng/common outside of azure-sdk-tools repo + if ((!"$(System.PullRequest.SourceBranch)".StartsWith("sync-.github/workflows")) -and "$(System.PullRequest.TargetBranch)" -match "^(refs/heads/)?$(DefaultBranch)$") + { + $filesInCommonDir = & "eng/common/scripts/get-changedfiles.ps1" -DiffPath '.github/workflows/*' + if (($LASTEXITCODE -eq 0) -and ($filesInCommonDir.Count -gt 0)) + { + Write-Host "##vso[task.LogIssue type=error;]Changes to files under '.github/workflows' directory should not be made in this Repo`n${filesInCommonDir}" + Write-Host "##vso[task.LogIssue type=error;]Please follow workflow at https://github.com/Azure/azure-sdk-tools/blob/main/doc/workflows/engsys_workflows.md" + exit 1 + } + } + displayName: Prevent changes to eng/common and .github/workflows outside of azure-sdk-tools repo condition: and(succeeded(), ne(variables['Skip.EngCommonWorkflowEnforcer'], 'true'), not(endsWith(variables['Build.Repository.Name'], '-pr'))) \ No newline at end of file diff --git a/eng/common/scripts/Delete-RemoteBranches.ps1 b/eng/common/scripts/Delete-RemoteBranches.ps1 index 2d1c3c303316..49cc85cbe909 100644 --- a/eng/common/scripts/Delete-RemoteBranches.ps1 +++ b/eng/common/scripts/Delete-RemoteBranches.ps1 @@ -10,6 +10,7 @@ param( $CentralRepoId, # We start from the sync PRs, use the branch name to get the PR number of central repo. E.g. sync-eng/common-()-(). Have group name on PR number. # For sync-eng/common work, we use regex as "^sync-eng/common.*-(?\d+).*$". + # For sync-.github/workflows work, we use regex as "^sync-.github/workflows.*-(?\d+).*$". $BranchRegex, # Date format: e.g. Tuesday, April 12, 2022 1:36:02 PM. Allow to use other date format. [AllowNull()] @@ -69,7 +70,7 @@ foreach ($res in $responses) LogError "No PR number found in the branch name. Please check the branch name [ $branchName ]. Skipping..." continue } - + try { $centralPR = Get-GitHubPullRequest -RepoId $CentralRepoId -PullRequestNumber $pullRequestNumber -AuthToken $AuthToken LogDebug "Found central PR pull request: $($centralPR.html_url)" @@ -78,7 +79,7 @@ foreach ($res in $responses) continue } } - catch + catch { # If there is no central PR for the PR number, log error and skip. LogError "Get-GitHubPullRequests failed with exception:`n$_" @@ -107,15 +108,15 @@ foreach ($res in $responses) LogDebug "The branch $branch last commit date [ $commitDate ] is newer than the date $LastCommitOlderThan. Skipping." continue } - + LogDebug "Branch [ $branchName ] in repo [ $RepoId ] has a last commit date [ $commitDate ] that is older than $LastCommitOlderThan. " } catch { LogError "Get-GithubReferenceCommitDate failed with exception:`n$_" exit 1 } - } - + } + try { if ($PSCmdlet.ShouldProcess("[ $branchName ] in [ $RepoId ]", "Deleting branches on cleanup script")) { Remove-GitHubSourceReferences -RepoId $RepoId -Ref $branch -AuthToken $AuthToken From 04b463d9e9c69777de6b93ef53658358a93b2b5b Mon Sep 17 00:00:00 2001 From: Richard Park <51494936+richardpark-msft@users.noreply.github.com> Date: Thu, 4 May 2023 15:40:34 -0700 Subject: [PATCH 22/50] [azeventhubs] Latest start position can also be inclusive (ie, get the latest message) (#20744) --- .../azeventhubs/consumer_client_unit_test.go | 30 +++++++++++-------- sdk/messaging/azeventhubs/partition_client.go | 20 ++++++------- .../azeventhubs/processor_unit_test.go | 2 +- 3 files changed, 28 insertions(+), 24 deletions(-) diff --git a/sdk/messaging/azeventhubs/consumer_client_unit_test.go b/sdk/messaging/azeventhubs/consumer_client_unit_test.go index 900e46d4d37f..1d91ff164280 100644 --- a/sdk/messaging/azeventhubs/consumer_client_unit_test.go +++ b/sdk/messaging/azeventhubs/consumer_client_unit_test.go @@ -49,42 +49,46 @@ func TestUnitNewConsumerClient(t *testing.T) { func TestUnit_getOffsetExpression(t *testing.T) { t.Run("Valid", func(t *testing.T) { - expr, err := getOffsetExpression(StartPosition{}) + expr, err := getStartExpression(StartPosition{}) require.NoError(t, err) require.Equal(t, "amqp.annotation.x-opt-offset > '@latest'", expr) - expr, err = getOffsetExpression(StartPosition{Earliest: to.Ptr(true)}) + expr, err = getStartExpression(StartPosition{Earliest: to.Ptr(true)}) require.NoError(t, err) require.Equal(t, "amqp.annotation.x-opt-offset > '-1'", expr) - expr, err = getOffsetExpression(StartPosition{Latest: to.Ptr(true)}) + expr, err = getStartExpression(StartPosition{Latest: to.Ptr(true)}) require.NoError(t, err) require.Equal(t, "amqp.annotation.x-opt-offset > '@latest'", expr) - expr, err = getOffsetExpression(StartPosition{Offset: to.Ptr(int64(101))}) + expr, err = getStartExpression(StartPosition{Latest: to.Ptr(true), Inclusive: true}) + require.NoError(t, err) + require.Equal(t, "amqp.annotation.x-opt-offset >= '@latest'", expr) + + expr, err = getStartExpression(StartPosition{Offset: to.Ptr(int64(101))}) require.NoError(t, err) require.Equal(t, "amqp.annotation.x-opt-offset > '101'", expr) - expr, err = getOffsetExpression(StartPosition{Offset: to.Ptr(int64(101)), Inclusive: true}) + expr, err = getStartExpression(StartPosition{Offset: to.Ptr(int64(101)), Inclusive: true}) require.NoError(t, err) require.Equal(t, "amqp.annotation.x-opt-offset >= '101'", expr) - expr, err = getOffsetExpression(StartPosition{SequenceNumber: to.Ptr(int64(202))}) + expr, err = getStartExpression(StartPosition{SequenceNumber: to.Ptr(int64(202))}) require.NoError(t, err) require.Equal(t, "amqp.annotation.x-opt-sequence-number > '202'", expr) - expr, err = getOffsetExpression(StartPosition{SequenceNumber: to.Ptr(int64(202)), Inclusive: true}) + expr, err = getStartExpression(StartPosition{SequenceNumber: to.Ptr(int64(202)), Inclusive: true}) require.NoError(t, err) require.Equal(t, "amqp.annotation.x-opt-sequence-number >= '202'", expr) enqueueTime, err := time.Parse(time.RFC3339, "2020-01-01T01:02:03Z") require.NoError(t, err) - expr, err = getOffsetExpression(StartPosition{EnqueuedTime: &enqueueTime}) + expr, err = getStartExpression(StartPosition{EnqueuedTime: &enqueueTime}) require.NoError(t, err) require.Equal(t, "amqp.annotation.x-opt-enqueued-time > '1577840523000'", expr) - expr, err = getOffsetExpression(StartPosition{EnqueuedTime: &enqueueTime, Inclusive: true}) + expr, err = getStartExpression(StartPosition{EnqueuedTime: &enqueueTime, Inclusive: true}) require.NoError(t, err) require.Equal(t, "amqp.annotation.x-opt-enqueued-time >= '1577840523000'", expr) }) @@ -93,28 +97,28 @@ func TestUnit_getOffsetExpression(t *testing.T) { enqueueTime, err := time.Parse(time.RFC3339, "2020-01-01T01:02:03Z") require.NoError(t, err) - expr, err := getOffsetExpression(StartPosition{ + expr, err := getStartExpression(StartPosition{ EnqueuedTime: &enqueueTime, Offset: to.Ptr[int64](101), }) require.EqualError(t, err, "only a single start point can be set: Earliest, EnqueuedTime, Latest, Offset, or SequenceNumber") require.Empty(t, expr) - expr, err = getOffsetExpression(StartPosition{ + expr, err = getStartExpression(StartPosition{ Offset: to.Ptr[int64](202), Latest: to.Ptr(true), }) require.EqualError(t, err, "only a single start point can be set: Earliest, EnqueuedTime, Latest, Offset, or SequenceNumber") require.Empty(t, expr) - expr, err = getOffsetExpression(StartPosition{ + expr, err = getStartExpression(StartPosition{ Latest: to.Ptr(true), SequenceNumber: to.Ptr[int64](202), }) require.EqualError(t, err, "only a single start point can be set: Earliest, EnqueuedTime, Latest, Offset, or SequenceNumber") require.Empty(t, expr) - expr, err = getOffsetExpression(StartPosition{ + expr, err = getStartExpression(StartPosition{ SequenceNumber: to.Ptr[int64](202), Earliest: to.Ptr(true), }) diff --git a/sdk/messaging/azeventhubs/partition_client.go b/sdk/messaging/azeventhubs/partition_client.go index 8c5ab53fe8a3..654099f53016 100644 --- a/sdk/messaging/azeventhubs/partition_client.go +++ b/sdk/messaging/azeventhubs/partition_client.go @@ -184,7 +184,7 @@ func (pc *PartitionClient) ReceiveEvents(ctx context.Context, count int, options numEvents := len(events) lastSequenceNumber := events[numEvents-1].SequenceNumber - pc.offsetExpression = formatOffsetExpressionForSequence(">", lastSequenceNumber) + pc.offsetExpression = formatStartExpressionForSequence(">", lastSequenceNumber) log.Writef(EventConsumer, "%d Events received, moving sequence to %d", numEvents, lastSequenceNumber) return events, nil } @@ -274,7 +274,7 @@ func newPartitionClient(args partitionClientArgs, options *PartitionClientOption options = &PartitionClientOptions{} } - offsetExpr, err := getOffsetExpression(options.StartPosition) + offsetExpr, err := getStartExpression(options.StartPosition) if err != nil { return nil, err @@ -317,11 +317,11 @@ func getAllPrefetched(receiver amqpwrap.AMQPReceiver, max int) []*amqp.Message { return messages } -func getOffsetExpression(startPosition StartPosition) (string, error) { - lt := ">" +func getStartExpression(startPosition StartPosition) (string, error) { + gt := ">" if startPosition.Inclusive { - lt = ">=" + gt = ">=" } var errMultipleFieldsSet = errors.New("only a single start point can be set: Earliest, EnqueuedTime, Latest, Offset, or SequenceNumber") @@ -330,7 +330,7 @@ func getOffsetExpression(startPosition StartPosition) (string, error) { if startPosition.EnqueuedTime != nil { // time-based, non-inclusive - offsetExpr = fmt.Sprintf("amqp.annotation.x-opt-enqueued-time %s '%d'", lt, startPosition.EnqueuedTime.UnixMilli()) + offsetExpr = fmt.Sprintf("amqp.annotation.x-opt-enqueued-time %s '%d'", gt, startPosition.EnqueuedTime.UnixMilli()) } if startPosition.Offset != nil { @@ -340,7 +340,7 @@ func getOffsetExpression(startPosition StartPosition) (string, error) { return "", errMultipleFieldsSet } - offsetExpr = fmt.Sprintf("amqp.annotation.x-opt-offset %s '%d'", lt, *startPosition.Offset) + offsetExpr = fmt.Sprintf("amqp.annotation.x-opt-offset %s '%d'", gt, *startPosition.Offset) } if startPosition.Latest != nil && *startPosition.Latest { @@ -348,7 +348,7 @@ func getOffsetExpression(startPosition StartPosition) (string, error) { return "", errMultipleFieldsSet } - offsetExpr = "amqp.annotation.x-opt-offset > '@latest'" + offsetExpr = fmt.Sprintf("amqp.annotation.x-opt-offset %s '@latest'", gt) } if startPosition.SequenceNumber != nil { @@ -356,7 +356,7 @@ func getOffsetExpression(startPosition StartPosition) (string, error) { return "", errMultipleFieldsSet } - offsetExpr = formatOffsetExpressionForSequence(lt, *startPosition.SequenceNumber) + offsetExpr = formatStartExpressionForSequence(gt, *startPosition.SequenceNumber) } if startPosition.Earliest != nil && *startPosition.Earliest { @@ -375,6 +375,6 @@ func getOffsetExpression(startPosition StartPosition) (string, error) { return "amqp.annotation.x-opt-offset > '@latest'", nil } -func formatOffsetExpressionForSequence(op string, sequenceNumber int64) string { +func formatStartExpressionForSequence(op string, sequenceNumber int64) string { return fmt.Sprintf("amqp.annotation.x-opt-sequence-number %s '%d'", op, sequenceNumber) } diff --git a/sdk/messaging/azeventhubs/processor_unit_test.go b/sdk/messaging/azeventhubs/processor_unit_test.go index c21b31e8c77e..ff257b58d408 100644 --- a/sdk/messaging/azeventhubs/processor_unit_test.go +++ b/sdk/messaging/azeventhubs/processor_unit_test.go @@ -236,7 +236,7 @@ func TestUnit_Processor_Run_startPosition(t *testing.T) { fakeConsumerClient := simpleFakeConsumerClient() fakeConsumerClient.newPartitionClientFn = func(partitionID string, options *PartitionClientOptions) (*PartitionClient, error) { - offsetExpr, err := getOffsetExpression(options.StartPosition) + offsetExpr, err := getStartExpression(options.StartPosition) require.NoError(t, err) return newFakePartitionClient(partitionID, offsetExpr), nil From 8849196dfa482cef8345590dc7bd12c8ad3c5de1 Mon Sep 17 00:00:00 2001 From: Azure SDK Bot <53356347+azure-sdk@users.noreply.github.com> Date: Fri, 5 May 2023 11:09:05 -0700 Subject: [PATCH 23/50] Update GitHubEventProcessor version and remove pull_request_review procesing (#20751) Co-authored-by: James Suplizio --- .github/workflows/event-processor.yml | 4 +--- .github/workflows/scheduled-event-processor.yml | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/.github/workflows/event-processor.yml b/.github/workflows/event-processor.yml index f974387634c1..befc1d9a2e31 100644 --- a/.github/workflows/event-processor.yml +++ b/.github/workflows/event-processor.yml @@ -11,8 +11,6 @@ on: # pull request merged is the closed event with github.event.pull_request.merged = true pull_request_target: types: [closed, labeled, opened, reopened, review_requested, synchronize, unlabeled] - pull_request_review: - types: [submitted] # This removes all unnecessary permissions, the ones needed will be set below. # https://docs.github.com/en/actions/security-guides/automatic-token-authentication#permissions-for-the-github_token @@ -57,7 +55,7 @@ jobs: run: > dotnet tool install Azure.Sdk.Tools.GitHubEventProcessor - --version 1.0.0-dev.20230422.1 + --version 1.0.0-dev.20230505.2 --add-source https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-net/nuget/v3/index.json --global shell: bash diff --git a/.github/workflows/scheduled-event-processor.yml b/.github/workflows/scheduled-event-processor.yml index 8b79f66e7d2c..53181c5a418c 100644 --- a/.github/workflows/scheduled-event-processor.yml +++ b/.github/workflows/scheduled-event-processor.yml @@ -34,7 +34,7 @@ jobs: run: > dotnet tool install Azure.Sdk.Tools.GitHubEventProcessor - --version 1.0.0-dev.20230422.1 + --version 1.0.0-dev.20230505.2 --add-source https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-net/nuget/v3/index.json --global shell: bash From 27f5ee0c8751f7a118a1b101f0cd99d328df4331 Mon Sep 17 00:00:00 2001 From: Charles Lowell <10964656+chlowell@users.noreply.github.com> Date: Fri, 5 May 2023 15:41:42 -0700 Subject: [PATCH 24/50] Rename DisableAuthorityValidationAndInstanceDiscovery (#20746) --- sdk/azidentity/CHANGELOG.md | 1 - sdk/azidentity/client_assertion_credential.go | 13 ++++++------- .../client_assertion_credential_test.go | 5 +---- .../client_certificate_credential.go | 13 ++++++------- .../client_certificate_credential_test.go | 4 ++-- sdk/azidentity/client_secret_credential.go | 13 ++++++------- .../client_secret_credential_test.go | 4 ++-- sdk/azidentity/default_azure_credential.go | 19 +++++++++---------- sdk/azidentity/device_code_credential.go | 13 ++++++------- sdk/azidentity/device_code_credential_test.go | 9 +++++---- sdk/azidentity/environment_credential.go | 17 ++++++++--------- sdk/azidentity/environment_credential_test.go | 16 ++++++++-------- .../interactive_browser_credential.go | 13 ++++++------- .../interactive_browser_credential_test.go | 12 ++++++------ sdk/azidentity/on_behalf_of_credential.go | 13 ++++++------- .../username_password_credential.go | 13 ++++++------- .../username_password_credential_test.go | 4 ++-- sdk/azidentity/workload_identity.go | 13 ++++++------- sdk/azidentity/workload_identity_test.go | 10 +++++----- 19 files changed, 96 insertions(+), 109 deletions(-) diff --git a/sdk/azidentity/CHANGELOG.md b/sdk/azidentity/CHANGELOG.md index 57ff2c6509b0..85d1d4a7c200 100644 --- a/sdk/azidentity/CHANGELOG.md +++ b/sdk/azidentity/CHANGELOG.md @@ -6,7 +6,6 @@ ### Breaking Changes > These changes affect only code written against a beta version such as v1.3.0-beta.5 -* Renamed `DisableInstanceDiscovery` to `DisableAuthorityValidationAndInstanceDiscovery` * Renamed `NewOnBehalfOfCredentialFromCertificate` to `NewOnBehalfOfCredentialWithCertificate` * Renamed `NewOnBehalfOfCredentialFromSecret` to `NewOnBehalfOfCredentialWithSecret` diff --git a/sdk/azidentity/client_assertion_credential.go b/sdk/azidentity/client_assertion_credential.go index 7db693094ec8..d9d22996cd47 100644 --- a/sdk/azidentity/client_assertion_credential.go +++ b/sdk/azidentity/client_assertion_credential.go @@ -36,12 +36,11 @@ type ClientAssertionCredentialOptions struct { // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the // application is registered. AdditionallyAllowedTenants []string - // DisableAuthorityValidationAndInstanceDiscovery should be set true only by applications authenticating - // in disconnected clouds, or private clouds such as Azure Stack. It determines whether the credential - // requests Azure AD instance metadata from https://login.microsoft.com before authenticating. Setting - // this to true will skip this request, making the application responsible for ensuring the configured - // authority is valid and trustworthy. - DisableAuthorityValidationAndInstanceDiscovery bool + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool } // NewClientAssertionCredential constructs a ClientAssertionCredential. The getAssertion function must be thread safe. Pass nil for options to accept defaults. @@ -57,7 +56,7 @@ func NewClientAssertionCredential(tenantID, clientID string, getAssertion func(c return getAssertion(ctx) }, ) - c, err := getConfidentialClient(clientID, tenantID, cred, &options.ClientOptions, confidential.WithInstanceDiscovery(!options.DisableAuthorityValidationAndInstanceDiscovery)) + c, err := getConfidentialClient(clientID, tenantID, cred, &options.ClientOptions, confidential.WithInstanceDiscovery(!options.DisableInstanceDiscovery)) if err != nil { return nil, err } diff --git a/sdk/azidentity/client_assertion_credential_test.go b/sdk/azidentity/client_assertion_credential_test.go index 3477d0a39d70..b6ac65566cea 100644 --- a/sdk/azidentity/client_assertion_credential_test.go +++ b/sdk/azidentity/client_assertion_credential_test.go @@ -100,10 +100,7 @@ func TestClientAssertionCredential_Live(t *testing.T) { func(context.Context) (string, error) { return getAssertion(certs[0], key) }, - &ClientAssertionCredentialOptions{ - ClientOptions: o, - DisableAuthorityValidationAndInstanceDiscovery: d, - }, + &ClientAssertionCredentialOptions{ClientOptions: o, DisableInstanceDiscovery: d}, ) if err != nil { t.Fatal(err) diff --git a/sdk/azidentity/client_certificate_credential.go b/sdk/azidentity/client_certificate_credential.go index acd9360b6ba5..804eba899ecf 100644 --- a/sdk/azidentity/client_certificate_credential.go +++ b/sdk/azidentity/client_certificate_credential.go @@ -29,12 +29,11 @@ type ClientCertificateCredentialOptions struct { // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the // application is registered. AdditionallyAllowedTenants []string - // DisableAuthorityValidationAndInstanceDiscovery should be set true only by applications authenticating - // in disconnected clouds, or private clouds such as Azure Stack. It determines whether the credential - // requests Azure AD instance metadata from https://login.microsoft.com before authenticating. Setting - // this to true will skip this request, making the application responsible for ensuring the configured - // authority is valid and trustworthy. - DisableAuthorityValidationAndInstanceDiscovery bool + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool // SendCertificateChain controls whether the credential sends the public certificate chain in the x5c // header of each token request's JWT. This is required for Subject Name/Issuer (SNI) authentication. // Defaults to False. @@ -63,7 +62,7 @@ func NewClientCertificateCredential(tenantID string, clientID string, certs []*x if options.SendCertificateChain { o = append(o, confidential.WithX5C()) } - o = append(o, confidential.WithInstanceDiscovery(!options.DisableAuthorityValidationAndInstanceDiscovery)) + o = append(o, confidential.WithInstanceDiscovery(!options.DisableInstanceDiscovery)) c, err := getConfidentialClient(clientID, tenantID, cred, &options.ClientOptions, o...) if err != nil { return nil, err diff --git a/sdk/azidentity/client_certificate_credential_test.go b/sdk/azidentity/client_certificate_credential_test.go index 02820f452234..0137aa7302b5 100644 --- a/sdk/azidentity/client_certificate_credential_test.go +++ b/sdk/azidentity/client_certificate_credential_test.go @@ -239,7 +239,7 @@ func TestClientCertificateCredential_Live(t *testing.T) { } o, stop := initRecording(t) defer stop() - opts := &ClientCertificateCredentialOptions{ClientOptions: o, DisableAuthorityValidationAndInstanceDiscovery: true} + opts := &ClientCertificateCredentialOptions{ClientOptions: o, DisableInstanceDiscovery: true} cred, err := NewClientCertificateCredential(liveSP.tenantID, liveSP.clientID, certs, key, opts) if err != nil { t.Fatalf("failed to construct credential: %v", err) @@ -265,7 +265,7 @@ func TestClientCertificateCredentialADFS_Live(t *testing.T) { o, stop := initRecording(t) defer stop() o.Cloud.ActiveDirectoryAuthorityHost = adfsAuthority - opts := &ClientCertificateCredentialOptions{ClientOptions: o, DisableAuthorityValidationAndInstanceDiscovery: true} + opts := &ClientCertificateCredentialOptions{ClientOptions: o, DisableInstanceDiscovery: true} cred, err := NewClientCertificateCredential("adfs", adfsLiveSP.clientID, certs, key, opts) if err != nil { t.Fatalf("failed to construct credential: %v", err) diff --git a/sdk/azidentity/client_secret_credential.go b/sdk/azidentity/client_secret_credential.go index 727910455909..dda21f6b88d6 100644 --- a/sdk/azidentity/client_secret_credential.go +++ b/sdk/azidentity/client_secret_credential.go @@ -24,12 +24,11 @@ type ClientSecretCredentialOptions struct { // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the // application is registered. AdditionallyAllowedTenants []string - // DisableAuthorityValidationAndInstanceDiscovery should be set true only by applications authenticating - // in disconnected clouds, or private clouds such as Azure Stack. It determines whether the credential - // requests Azure AD instance metadata from https://login.microsoft.com before authenticating. Setting - // this to true will skip this request, making the application responsible for ensuring the configured - // authority is valid and trustworthy. - DisableAuthorityValidationAndInstanceDiscovery bool + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool } // ClientSecretCredential authenticates an application with a client secret. @@ -48,7 +47,7 @@ func NewClientSecretCredential(tenantID string, clientID string, clientSecret st return nil, err } c, err := getConfidentialClient( - clientID, tenantID, cred, &options.ClientOptions, confidential.WithInstanceDiscovery(!options.DisableAuthorityValidationAndInstanceDiscovery), + clientID, tenantID, cred, &options.ClientOptions, confidential.WithInstanceDiscovery(!options.DisableInstanceDiscovery), ) if err != nil { return nil, err diff --git a/sdk/azidentity/client_secret_credential_test.go b/sdk/azidentity/client_secret_credential_test.go index e5a7abe1efc1..9e6b230522f6 100644 --- a/sdk/azidentity/client_secret_credential_test.go +++ b/sdk/azidentity/client_secret_credential_test.go @@ -49,7 +49,7 @@ func TestClientSecretCredential_Live(t *testing.T) { t.Run(name, func(t *testing.T) { opts, stop := initRecording(t) defer stop() - o := ClientSecretCredentialOptions{ClientOptions: opts, DisableAuthorityValidationAndInstanceDiscovery: disabledID} + o := ClientSecretCredentialOptions{ClientOptions: opts, DisableInstanceDiscovery: disabledID} cred, err := NewClientSecretCredential(liveSP.tenantID, liveSP.clientID, liveSP.secret, &o) if err != nil { t.Fatalf("failed to construct credential: %v", err) @@ -68,7 +68,7 @@ func TestClientSecretCredentialADFS_Live(t *testing.T) { opts, stop := initRecording(t) defer stop() opts.Cloud.ActiveDirectoryAuthorityHost = adfsAuthority - o := ClientSecretCredentialOptions{ClientOptions: opts, DisableAuthorityValidationAndInstanceDiscovery: true} + o := ClientSecretCredentialOptions{ClientOptions: opts, DisableInstanceDiscovery: true} cred, err := NewClientSecretCredential("adfs", adfsLiveSP.clientID, adfsLiveSP.secret, &o) if err != nil { t.Fatalf("failed to construct credential: %v", err) diff --git a/sdk/azidentity/default_azure_credential.go b/sdk/azidentity/default_azure_credential.go index c3f580dc560b..1e3efdc97a96 100644 --- a/sdk/azidentity/default_azure_credential.go +++ b/sdk/azidentity/default_azure_credential.go @@ -27,12 +27,11 @@ type DefaultAzureCredentialOptions struct { // the wildcard value "*" to allow the credential to acquire tokens for any tenant. This value can also be // set as a semicolon delimited list of tenants in the environment variable AZURE_ADDITIONALLY_ALLOWED_TENANTS. AdditionallyAllowedTenants []string - // DisableAuthorityValidationAndInstanceDiscovery should be set true only by applications authenticating - // in disconnected clouds, or private clouds such as Azure Stack. It determines whether the credential - // requests Azure AD instance metadata from https://login.microsoft.com before authenticating. Setting - // this to true will skip this request, making the application responsible for ensuring the configured - // authority is valid and trustworthy. - DisableAuthorityValidationAndInstanceDiscovery bool + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool // TenantID identifies the tenant the Azure CLI should authenticate in. // Defaults to the CLI's default tenant, which is typically the home tenant of the user logged in to the CLI. TenantID string @@ -73,9 +72,9 @@ func NewDefaultAzureCredential(options *DefaultAzureCredentialOptions) (*Default } envCred, err := NewEnvironmentCredential(&EnvironmentCredentialOptions{ - ClientOptions: options.ClientOptions, - DisableAuthorityValidationAndInstanceDiscovery: options.DisableAuthorityValidationAndInstanceDiscovery, - additionallyAllowedTenants: additionalTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + additionallyAllowedTenants: additionalTenants, }) if err == nil { creds = append(creds, envCred) @@ -88,7 +87,7 @@ func NewDefaultAzureCredential(options *DefaultAzureCredentialOptions) (*Default wic, err := NewWorkloadIdentityCredential(&WorkloadIdentityCredentialOptions{ AdditionallyAllowedTenants: additionalTenants, ClientOptions: options.ClientOptions, - DisableAuthorityValidationAndInstanceDiscovery: options.DisableAuthorityValidationAndInstanceDiscovery, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, }) if err == nil { creds = append(creds, wic) diff --git a/sdk/azidentity/device_code_credential.go b/sdk/azidentity/device_code_credential.go index a7c3376631c1..108e83c43aee 100644 --- a/sdk/azidentity/device_code_credential.go +++ b/sdk/azidentity/device_code_credential.go @@ -27,12 +27,11 @@ type DeviceCodeCredentialOptions struct { // ClientID is the ID of the application users will authenticate to. // Defaults to the ID of an Azure development application. ClientID string - // DisableAuthorityValidationAndInstanceDiscovery should be set true only by applications authenticating - // in disconnected clouds, or private clouds such as Azure Stack. It determines whether the credential - // requests Azure AD instance metadata from https://login.microsoft.com before authenticating. Setting - // this to true will skip this request, making the application responsible for ensuring the configured - // authority is valid and trustworthy. - DisableAuthorityValidationAndInstanceDiscovery bool + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool // TenantID is the Azure Active Directory tenant the credential authenticates in. Defaults to the // "organizations" tenant, which can authenticate work and school accounts. Required for single-tenant // applications. @@ -89,7 +88,7 @@ func NewDeviceCodeCredential(options *DeviceCodeCredentialOptions) (*DeviceCodeC } cp.init() c, err := getPublicClient( - cp.ClientID, cp.TenantID, &cp.ClientOptions, public.WithInstanceDiscovery(!cp.DisableAuthorityValidationAndInstanceDiscovery), + cp.ClientID, cp.TenantID, &cp.ClientOptions, public.WithInstanceDiscovery(!cp.DisableInstanceDiscovery), ) if err != nil { return nil, err diff --git a/sdk/azidentity/device_code_credential_test.go b/sdk/azidentity/device_code_credential_test.go index 0f91343a8042..de2eb7fe5962 100644 --- a/sdk/azidentity/device_code_credential_test.go +++ b/sdk/azidentity/device_code_credential_test.go @@ -99,7 +99,7 @@ func TestDeviceCodeCredential_Live(t *testing.T) { }, { desc: "instance discovery disabled", - opts: DeviceCodeCredentialOptions{DisableAuthorityValidationAndInstanceDiscovery: true, TenantID: liveSP.tenantID}, + opts: DeviceCodeCredentialOptions{DisableInstanceDiscovery: true, TenantID: liveSP.tenantID}, }, { desc: "optional tenant", @@ -133,9 +133,10 @@ func TestDeviceCodeCredentialADFS_Live(t *testing.T) { defer stop() o.Cloud.ActiveDirectoryAuthorityHost = adfsAuthority opts := DeviceCodeCredentialOptions{ - ClientID: adfsLiveUser.clientID, - ClientOptions: o, DisableAuthorityValidationAndInstanceDiscovery: true, - TenantID: "adfs", + ClientID: adfsLiveUser.clientID, + ClientOptions: o, + DisableInstanceDiscovery: true, + TenantID: "adfs", } if recording.GetRecordMode() == recording.PlaybackMode { opts.UserPrompt = func(ctx context.Context, m DeviceCodeMessage) error { return nil } diff --git a/sdk/azidentity/environment_credential.go b/sdk/azidentity/environment_credential.go index cefb1dd4da2a..7ecd928e0245 100644 --- a/sdk/azidentity/environment_credential.go +++ b/sdk/azidentity/environment_credential.go @@ -24,12 +24,11 @@ const envVarSendCertChain = "AZURE_CLIENT_SEND_CERTIFICATE_CHAIN" type EnvironmentCredentialOptions struct { azcore.ClientOptions - // DisableAuthorityValidationAndInstanceDiscovery should be set true only by applications authenticating - // in disconnected clouds, or private clouds such as Azure Stack. It determines whether the credential - // requests Azure AD instance metadata from https://login.microsoft.com before authenticating. Setting - // this to true will skip this request, making the application responsible for ensuring the configured - // authority is valid and trustworthy. - DisableAuthorityValidationAndInstanceDiscovery bool + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool // additionallyAllowedTenants is used only by NewDefaultAzureCredential() to enable that constructor's explicit // option to override the value of AZURE_ADDITIONALLY_ALLOWED_TENANTS. Applications using EnvironmentCredential // directly should set that variable instead. This field should remain unexported to preserve this credential's @@ -102,7 +101,7 @@ func NewEnvironmentCredential(options *EnvironmentCredentialOptions) (*Environme o := &ClientSecretCredentialOptions{ AdditionallyAllowedTenants: additionalTenants, ClientOptions: options.ClientOptions, - DisableAuthorityValidationAndInstanceDiscovery: options.DisableAuthorityValidationAndInstanceDiscovery, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, } cred, err := NewClientSecretCredential(tenantID, clientID, clientSecret, o) if err != nil { @@ -127,7 +126,7 @@ func NewEnvironmentCredential(options *EnvironmentCredentialOptions) (*Environme o := &ClientCertificateCredentialOptions{ AdditionallyAllowedTenants: additionalTenants, ClientOptions: options.ClientOptions, - DisableAuthorityValidationAndInstanceDiscovery: options.DisableAuthorityValidationAndInstanceDiscovery, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, } if v, ok := os.LookupEnv(envVarSendCertChain); ok { o.SendCertificateChain = v == "1" || strings.ToLower(v) == "true" @@ -144,7 +143,7 @@ func NewEnvironmentCredential(options *EnvironmentCredentialOptions) (*Environme o := &UsernamePasswordCredentialOptions{ AdditionallyAllowedTenants: additionalTenants, ClientOptions: options.ClientOptions, - DisableAuthorityValidationAndInstanceDiscovery: options.DisableAuthorityValidationAndInstanceDiscovery, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, } cred, err := NewUsernamePasswordCredential(tenantID, clientID, username, password, o) if err != nil { diff --git a/sdk/azidentity/environment_credential_test.go b/sdk/azidentity/environment_credential_test.go index e8035efe6f8e..061acb16d33a 100644 --- a/sdk/azidentity/environment_credential_test.go +++ b/sdk/azidentity/environment_credential_test.go @@ -248,8 +248,8 @@ func TestEnvironmentCredential_ClientSecretLive(t *testing.T) { opts, stop := initRecording(t) defer stop() cred, err := NewEnvironmentCredential(&EnvironmentCredentialOptions{ - ClientOptions: opts, - DisableAuthorityValidationAndInstanceDiscovery: disabledID, + ClientOptions: opts, + DisableInstanceDiscovery: disabledID, }) if err != nil { t.Fatalf("failed to construct credential: %v", err) @@ -275,8 +275,8 @@ func TestEnvironmentCredentialADFS_ClientSecretLive(t *testing.T) { opts, stop := initRecording(t) defer stop() cred, err := NewEnvironmentCredential(&EnvironmentCredentialOptions{ - ClientOptions: opts, - DisableAuthorityValidationAndInstanceDiscovery: true, + ClientOptions: opts, + DisableInstanceDiscovery: true, }) if err != nil { t.Fatalf("failed to construct credential: %v", err) @@ -330,8 +330,8 @@ func TestEnvironmentCredential_UserPasswordLive(t *testing.T) { opts, stop := initRecording(t) defer stop() cred, err := NewEnvironmentCredential(&EnvironmentCredentialOptions{ - ClientOptions: opts, - DisableAuthorityValidationAndInstanceDiscovery: disabledID, + ClientOptions: opts, + DisableInstanceDiscovery: disabledID, }) if err != nil { t.Fatalf("failed to construct credential: %v", err) @@ -358,8 +358,8 @@ func TestEnvironmentCredentialADFS_UserPasswordLive(t *testing.T) { opts, stop := initRecording(t) defer stop() cred, err := NewEnvironmentCredential(&EnvironmentCredentialOptions{ - ClientOptions: opts, - DisableAuthorityValidationAndInstanceDiscovery: true, + ClientOptions: opts, + DisableInstanceDiscovery: true, }) if err != nil { t.Fatalf("failed to construct credential: %v", err) diff --git a/sdk/azidentity/interactive_browser_credential.go b/sdk/azidentity/interactive_browser_credential.go index 17cca645c7e6..4868d22c3e1f 100644 --- a/sdk/azidentity/interactive_browser_credential.go +++ b/sdk/azidentity/interactive_browser_credential.go @@ -27,12 +27,11 @@ type InteractiveBrowserCredentialOptions struct { // Defaults to the ID of an Azure development application. ClientID string - // DisableAuthorityValidationAndInstanceDiscovery should be set true only by applications authenticating - // in disconnected clouds, or private clouds such as Azure Stack. It determines whether the credential - // requests Azure AD instance metadata from https://login.microsoft.com before authenticating. Setting - // this to true will skip this request, making the application responsible for ensuring the configured - // authority is valid and trustworthy. - DisableAuthorityValidationAndInstanceDiscovery bool + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool // LoginHint pre-populates the account prompt with a username. Users may choose to authenticate a different account. LoginHint string @@ -70,7 +69,7 @@ func NewInteractiveBrowserCredential(options *InteractiveBrowserCredentialOption cp = *options } cp.init() - c, err := getPublicClient(cp.ClientID, cp.TenantID, &cp.ClientOptions, public.WithInstanceDiscovery(!cp.DisableAuthorityValidationAndInstanceDiscovery)) + c, err := getPublicClient(cp.ClientID, cp.TenantID, &cp.ClientOptions, public.WithInstanceDiscovery(!cp.DisableInstanceDiscovery)) if err != nil { return nil, err } diff --git a/sdk/azidentity/interactive_browser_credential_test.go b/sdk/azidentity/interactive_browser_credential_test.go index e4c5d2b7970f..1f9c214b1c68 100644 --- a/sdk/azidentity/interactive_browser_credential_test.go +++ b/sdk/azidentity/interactive_browser_credential_test.go @@ -111,7 +111,7 @@ func TestInteractiveBrowserCredential_Live(t *testing.T) { PerCallPolicies: []policy.Policy{ &instanceDiscoveryPolicy{t}, }}, - DisableAuthorityValidationAndInstanceDiscovery: true, + DisableInstanceDiscovery: true, }) if err != nil { t.Fatal(err) @@ -135,11 +135,11 @@ func TestInteractiveBrowserCredentialADFS_Live(t *testing.T) { clientOptions := policy.ClientOptions{Cloud: cloudConfig} cred, err := NewInteractiveBrowserCredential(&InteractiveBrowserCredentialOptions{ - ClientOptions: clientOptions, - ClientID: adfsLiveUser.clientID, - DisableAuthorityValidationAndInstanceDiscovery: true, - RedirectURL: url, - TenantID: "adfs", + ClientOptions: clientOptions, + ClientID: adfsLiveUser.clientID, + DisableInstanceDiscovery: true, + RedirectURL: url, + TenantID: "adfs", }) if err != nil { t.Fatal(err) diff --git a/sdk/azidentity/on_behalf_of_credential.go b/sdk/azidentity/on_behalf_of_credential.go index a6e9f3ec6f2c..3e173f47d26d 100644 --- a/sdk/azidentity/on_behalf_of_credential.go +++ b/sdk/azidentity/on_behalf_of_credential.go @@ -38,12 +38,11 @@ type OnBehalfOfCredentialOptions struct { // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the // application is registered. AdditionallyAllowedTenants []string - // DisableAuthorityValidationAndInstanceDiscovery should be set true only by applications authenticating - // in disconnected clouds, or private clouds such as Azure Stack. It determines whether the credential - // requests Azure AD instance metadata from https://login.microsoft.com before authenticating. Setting - // this to true will skip this request, making the application responsible for ensuring the configured - // authority is valid and trustworthy. - DisableAuthorityValidationAndInstanceDiscovery bool + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool // SendCertificateChain applies only when the credential is configured to authenticate with a certificate. // This setting controls whether the credential sends the public certificate chain in the x5c header of each // token request's JWT. This is required for, and only used in, Subject Name/Issuer (SNI) authentication. @@ -77,7 +76,7 @@ func newOnBehalfOfCredential(tenantID, clientID, userAssertion string, cred conf if options.SendCertificateChain { opts = append(opts, confidential.WithX5C()) } - opts = append(opts, confidential.WithInstanceDiscovery(!options.DisableAuthorityValidationAndInstanceDiscovery)) + opts = append(opts, confidential.WithInstanceDiscovery(!options.DisableInstanceDiscovery)) c, err := getConfidentialClient(clientID, tenantID, cred, &options.ClientOptions, opts...) if err != nil { return nil, err diff --git a/sdk/azidentity/username_password_credential.go b/sdk/azidentity/username_password_credential.go index b6bbf7d92828..8e652e33ff6f 100644 --- a/sdk/azidentity/username_password_credential.go +++ b/sdk/azidentity/username_password_credential.go @@ -24,12 +24,11 @@ type UsernamePasswordCredentialOptions struct { // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the // application is registered. AdditionallyAllowedTenants []string - // DisableAuthorityValidationAndInstanceDiscovery should be set true only by applications authenticating - // in disconnected clouds, or private clouds such as Azure Stack. It determines whether the credential - // requests Azure AD instance metadata from https://login.microsoft.com before authenticating. Setting - // this to true will skip this request, making the application responsible for ensuring the configured - // authority is valid and trustworthy. - DisableAuthorityValidationAndInstanceDiscovery bool + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool } // UsernamePasswordCredential authenticates a user with a password. Microsoft doesn't recommend this kind of authentication, @@ -49,7 +48,7 @@ func NewUsernamePasswordCredential(tenantID string, clientID string, username st if options == nil { options = &UsernamePasswordCredentialOptions{} } - c, err := getPublicClient(clientID, tenantID, &options.ClientOptions, public.WithInstanceDiscovery(!options.DisableAuthorityValidationAndInstanceDiscovery)) + c, err := getPublicClient(clientID, tenantID, &options.ClientOptions, public.WithInstanceDiscovery(!options.DisableInstanceDiscovery)) if err != nil { return nil, err } diff --git a/sdk/azidentity/username_password_credential_test.go b/sdk/azidentity/username_password_credential_test.go index 82c68e50e058..304e24d03cc0 100644 --- a/sdk/azidentity/username_password_credential_test.go +++ b/sdk/azidentity/username_password_credential_test.go @@ -47,7 +47,7 @@ func TestUsernamePasswordCredential_Live(t *testing.T) { t.Run(name, func(t *testing.T) { o, stop := initRecording(t) defer stop() - opts := UsernamePasswordCredentialOptions{ClientOptions: o, DisableAuthorityValidationAndInstanceDiscovery: disabledID} + opts := UsernamePasswordCredentialOptions{ClientOptions: o, DisableInstanceDiscovery: disabledID} cred, err := NewUsernamePasswordCredential(liveUser.tenantID, developerSignOnClientID, liveUser.username, liveUser.password, &opts) if err != nil { t.Fatalf("Unable to create credential. Received: %v", err) @@ -66,7 +66,7 @@ func TestUsernamePasswordCredentialADFS_Live(t *testing.T) { o, stop := initRecording(t) o.Cloud.ActiveDirectoryAuthorityHost = adfsAuthority defer stop() - opts := UsernamePasswordCredentialOptions{ClientOptions: o, DisableAuthorityValidationAndInstanceDiscovery: true} + opts := UsernamePasswordCredentialOptions{ClientOptions: o, DisableInstanceDiscovery: true} cred, err := NewUsernamePasswordCredential("adfs", adfsLiveUser.clientID, adfsLiveUser.username, adfsLiveUser.password, &opts) if err != nil { t.Fatalf("Unable to create credential. Received: %v", err) diff --git a/sdk/azidentity/workload_identity.go b/sdk/azidentity/workload_identity.go index f1b5f693821f..7bfb3436760d 100644 --- a/sdk/azidentity/workload_identity.go +++ b/sdk/azidentity/workload_identity.go @@ -40,12 +40,11 @@ type WorkloadIdentityCredentialOptions struct { AdditionallyAllowedTenants []string // ClientID of the service principal. Defaults to the value of the environment variable AZURE_CLIENT_ID. ClientID string - // DisableAuthorityValidationAndInstanceDiscovery should be set true only by applications authenticating - // in disconnected clouds, or private clouds such as Azure Stack. It determines whether the credential - // requests Azure AD instance metadata from https://login.microsoft.com before authenticating. Setting - // this to true will skip this request, making the application responsible for ensuring the configured - // authority is valid and trustworthy. - DisableAuthorityValidationAndInstanceDiscovery bool + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or + // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making + // the application responsible for ensuring the configured authority is valid and trustworthy. + DisableInstanceDiscovery bool // TenantID of the service principal. Defaults to the value of the environment variable AZURE_TENANT_ID. TenantID string // TokenFilePath is the path a file containing the workload identity token. Defaults to the value of the @@ -82,7 +81,7 @@ func NewWorkloadIdentityCredential(options *WorkloadIdentityCredentialOptions) ( caco := ClientAssertionCredentialOptions{ AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, ClientOptions: options.ClientOptions, - DisableAuthorityValidationAndInstanceDiscovery: options.DisableAuthorityValidationAndInstanceDiscovery, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, } cred, err := NewClientAssertionCredential(tenantID, clientID, w.getAssertion, &caco) if err != nil { diff --git a/sdk/azidentity/workload_identity_test.go b/sdk/azidentity/workload_identity_test.go index 83338315467b..121e60a0e389 100644 --- a/sdk/azidentity/workload_identity_test.go +++ b/sdk/azidentity/workload_identity_test.go @@ -71,11 +71,11 @@ func TestWorkloadIdentityCredential_Live(t *testing.T) { co, stop := initRecording(t) defer stop() cred, err := NewWorkloadIdentityCredential(&WorkloadIdentityCredentialOptions{ - ClientID: liveSP.clientID, - ClientOptions: co, - DisableAuthorityValidationAndInstanceDiscovery: b, - TenantID: liveSP.tenantID, - TokenFilePath: f, + ClientID: liveSP.clientID, + ClientOptions: co, + DisableInstanceDiscovery: b, + TenantID: liveSP.tenantID, + TokenFilePath: f, }) if err != nil { t.Fatal(err) From 2eec70782a7d47e1dd81e64f6764ab2747494c5f Mon Sep 17 00:00:00 2001 From: Peng Jiahui <46921893+Alancere@users.noreply.github.com> Date: Sat, 6 May 2023 09:55:34 +0800 Subject: [PATCH 25/50] fix (#20707) --- eng/tools/generator/cmd/v2/common/fileProcessor.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/eng/tools/generator/cmd/v2/common/fileProcessor.go b/eng/tools/generator/cmd/v2/common/fileProcessor.go index 6b8730746ae6..1636f36bbe5c 100644 --- a/eng/tools/generator/cmd/v2/common/fileProcessor.go +++ b/eng/tools/generator/cmd/v2/common/fileProcessor.go @@ -94,9 +94,9 @@ func ReadV2ModuleNameToGetNamespace(path string) (map[string][]PackageInfo, erro return nil, fmt.Errorf("last `track2` section does not properly end") } - s := strings.ReplaceAll(path, "\\", "/") - s1 := strings.Split(s, "/") - specName := s1[len(s1)-3] + _, after, _ := strings.Cut(strings.ReplaceAll(path, "\\", "/"), "specification") + before, _, _ := strings.Cut(after, "resource-manager") + specName := strings.Trim(before, "/") for i := range start { // get the content of the `track2` section From 22db2d46da8c537170bd954e78fe0296831530ca Mon Sep 17 00:00:00 2001 From: Sourav Gupta <98318303+souravgupta-msft@users.noreply.github.com> Date: Mon, 8 May 2023 10:20:33 +0530 Subject: [PATCH 26/50] AzFile (#20739) --- eng/config.json | 4 + sdk/storage/azfile/CHANGELOG.md | 7 + sdk/storage/azfile/LICENSE.txt | 21 + sdk/storage/azfile/README.md | 266 ++ sdk/storage/azfile/assets.json | 6 + sdk/storage/azfile/ci.yml | 33 + sdk/storage/azfile/directory/client.go | 205 ++ sdk/storage/azfile/directory/client_test.go | 1117 ++++++ sdk/storage/azfile/directory/constants.go | 24 + sdk/storage/azfile/directory/examples_test.go | 193 + sdk/storage/azfile/directory/models.go | 255 ++ sdk/storage/azfile/directory/responses.go | 39 + sdk/storage/azfile/doc.go | 229 ++ sdk/storage/azfile/file/chunkwriting.go | 189 + sdk/storage/azfile/file/client.go | 505 +++ sdk/storage/azfile/file/client_test.go | 3121 +++++++++++++++++ sdk/storage/azfile/file/constants.go | 78 + sdk/storage/azfile/file/examples_test.go | 650 ++++ sdk/storage/azfile/file/mmf_unix.go | 38 + sdk/storage/azfile/file/mmf_windows.go | 56 + sdk/storage/azfile/file/models.go | 743 ++++ sdk/storage/azfile/file/responses.go | 93 + sdk/storage/azfile/file/retry_reader.go | 186 + sdk/storage/azfile/fileerror/error_codes.go | 107 + sdk/storage/azfile/go.mod | 28 + sdk/storage/azfile/go.sum | 46 + sdk/storage/azfile/internal/base/clients.go | 60 + .../azfile/internal/exported/access_policy.go | 62 + .../exported/copy_file_smb_options.go | 96 + .../azfile/internal/exported/exported.go | 33 + .../internal/exported/file_permissions.go | 32 + .../azfile/internal/exported/log_events.go | 17 + .../exported/shared_key_credential.go | 218 ++ .../azfile/internal/exported/smb_property.go | 98 + .../exported/transfer_validation_option.go | 28 + .../azfile/internal/exported/version.go | 12 + .../azfile/internal/generated/autorest.md | 309 ++ .../azfile/internal/generated/build.go | 10 + .../internal/generated/directory_client.go | 22 + .../azfile/internal/generated/file_client.go | 17 + .../azfile/internal/generated/models.go | 25 + .../internal/generated/service_client.go | 17 + .../azfile/internal/generated/share_client.go | 17 + .../azfile/internal/generated/zz_constants.go | 342 ++ .../internal/generated/zz_directory_client.go | 766 ++++ .../internal/generated/zz_file_client.go | 1826 ++++++++++ .../azfile/internal/generated/zz_models.go | 932 +++++ .../internal/generated/zz_models_serde.go | 344 ++ .../internal/generated/zz_response_types.go | 1189 +++++++ .../internal/generated/zz_service_client.go | 195 + .../internal/generated/zz_share_client.go | 1437 ++++++++ .../internal/generated/zz_time_rfc1123.go | 43 + .../internal/generated/zz_time_rfc3339.go | 59 + .../internal/generated/zz_xml_helper.go | 41 + .../azfile/internal/shared/batch_transfer.go | 77 + .../azfile/internal/shared/bytes_writer.go | 30 + .../internal/shared/bytes_writer_test.go | 37 + .../azfile/internal/shared/section_writer.go | 53 + .../internal/shared/section_writer_test.go | 98 + sdk/storage/azfile/internal/shared/shared.go | 209 ++ .../azfile/internal/shared/shared_test.go | 95 + .../internal/testcommon/clients_auth.go | 224 ++ .../azfile/internal/testcommon/common.go | 117 + sdk/storage/azfile/lease/client_test.go | 633 ++++ sdk/storage/azfile/lease/constants.go | 51 + sdk/storage/azfile/lease/examples_test.go | 101 + sdk/storage/azfile/lease/file_client.go | 103 + sdk/storage/azfile/lease/models.go | 147 + sdk/storage/azfile/lease/responses.go | 36 + sdk/storage/azfile/lease/share_client.go | 116 + sdk/storage/azfile/log.go | 16 + sdk/storage/azfile/sas/account.go | 183 + sdk/storage/azfile/sas/account_test.go | 124 + sdk/storage/azfile/sas/query_params.go | 339 ++ sdk/storage/azfile/sas/query_params_test.go | 211 ++ sdk/storage/azfile/sas/service.go | 227 ++ sdk/storage/azfile/sas/service_test.go | 147 + sdk/storage/azfile/sas/url_parts.go | 147 + sdk/storage/azfile/sas/url_parts_test.go | 75 + sdk/storage/azfile/service/client.go | 214 ++ sdk/storage/azfile/service/client_test.go | 454 +++ sdk/storage/azfile/service/constants.go | 37 + sdk/storage/azfile/service/examples_test.go | 308 ++ sdk/storage/azfile/service/models.go | 171 + sdk/storage/azfile/service/responses.go | 30 + sdk/storage/azfile/share/client.go | 258 ++ sdk/storage/azfile/share/client_test.go | 1460 ++++++++ sdk/storage/azfile/share/constants.go | 50 + sdk/storage/azfile/share/examples_test.go | 464 +++ sdk/storage/azfile/share/models.go | 312 ++ sdk/storage/azfile/share/responses.go | 45 + sdk/storage/azfile/test-resources.json | 579 +++ 92 files changed, 24464 insertions(+) create mode 100644 sdk/storage/azfile/CHANGELOG.md create mode 100644 sdk/storage/azfile/LICENSE.txt create mode 100644 sdk/storage/azfile/README.md create mode 100644 sdk/storage/azfile/assets.json create mode 100644 sdk/storage/azfile/ci.yml create mode 100644 sdk/storage/azfile/directory/client.go create mode 100644 sdk/storage/azfile/directory/client_test.go create mode 100644 sdk/storage/azfile/directory/constants.go create mode 100644 sdk/storage/azfile/directory/examples_test.go create mode 100644 sdk/storage/azfile/directory/models.go create mode 100644 sdk/storage/azfile/directory/responses.go create mode 100644 sdk/storage/azfile/doc.go create mode 100644 sdk/storage/azfile/file/chunkwriting.go create mode 100644 sdk/storage/azfile/file/client.go create mode 100644 sdk/storage/azfile/file/client_test.go create mode 100644 sdk/storage/azfile/file/constants.go create mode 100644 sdk/storage/azfile/file/examples_test.go create mode 100644 sdk/storage/azfile/file/mmf_unix.go create mode 100644 sdk/storage/azfile/file/mmf_windows.go create mode 100644 sdk/storage/azfile/file/models.go create mode 100644 sdk/storage/azfile/file/responses.go create mode 100644 sdk/storage/azfile/file/retry_reader.go create mode 100644 sdk/storage/azfile/fileerror/error_codes.go create mode 100644 sdk/storage/azfile/go.mod create mode 100644 sdk/storage/azfile/go.sum create mode 100644 sdk/storage/azfile/internal/base/clients.go create mode 100644 sdk/storage/azfile/internal/exported/access_policy.go create mode 100644 sdk/storage/azfile/internal/exported/copy_file_smb_options.go create mode 100644 sdk/storage/azfile/internal/exported/exported.go create mode 100644 sdk/storage/azfile/internal/exported/file_permissions.go create mode 100644 sdk/storage/azfile/internal/exported/log_events.go create mode 100644 sdk/storage/azfile/internal/exported/shared_key_credential.go create mode 100644 sdk/storage/azfile/internal/exported/smb_property.go create mode 100644 sdk/storage/azfile/internal/exported/transfer_validation_option.go create mode 100644 sdk/storage/azfile/internal/exported/version.go create mode 100644 sdk/storage/azfile/internal/generated/autorest.md create mode 100644 sdk/storage/azfile/internal/generated/build.go create mode 100644 sdk/storage/azfile/internal/generated/directory_client.go create mode 100644 sdk/storage/azfile/internal/generated/file_client.go create mode 100644 sdk/storage/azfile/internal/generated/models.go create mode 100644 sdk/storage/azfile/internal/generated/service_client.go create mode 100644 sdk/storage/azfile/internal/generated/share_client.go create mode 100644 sdk/storage/azfile/internal/generated/zz_constants.go create mode 100644 sdk/storage/azfile/internal/generated/zz_directory_client.go create mode 100644 sdk/storage/azfile/internal/generated/zz_file_client.go create mode 100644 sdk/storage/azfile/internal/generated/zz_models.go create mode 100644 sdk/storage/azfile/internal/generated/zz_models_serde.go create mode 100644 sdk/storage/azfile/internal/generated/zz_response_types.go create mode 100644 sdk/storage/azfile/internal/generated/zz_service_client.go create mode 100644 sdk/storage/azfile/internal/generated/zz_share_client.go create mode 100644 sdk/storage/azfile/internal/generated/zz_time_rfc1123.go create mode 100644 sdk/storage/azfile/internal/generated/zz_time_rfc3339.go create mode 100644 sdk/storage/azfile/internal/generated/zz_xml_helper.go create mode 100644 sdk/storage/azfile/internal/shared/batch_transfer.go create mode 100644 sdk/storage/azfile/internal/shared/bytes_writer.go create mode 100644 sdk/storage/azfile/internal/shared/bytes_writer_test.go create mode 100644 sdk/storage/azfile/internal/shared/section_writer.go create mode 100644 sdk/storage/azfile/internal/shared/section_writer_test.go create mode 100644 sdk/storage/azfile/internal/shared/shared.go create mode 100644 sdk/storage/azfile/internal/shared/shared_test.go create mode 100644 sdk/storage/azfile/internal/testcommon/clients_auth.go create mode 100644 sdk/storage/azfile/internal/testcommon/common.go create mode 100644 sdk/storage/azfile/lease/client_test.go create mode 100644 sdk/storage/azfile/lease/constants.go create mode 100644 sdk/storage/azfile/lease/examples_test.go create mode 100644 sdk/storage/azfile/lease/file_client.go create mode 100644 sdk/storage/azfile/lease/models.go create mode 100644 sdk/storage/azfile/lease/responses.go create mode 100644 sdk/storage/azfile/lease/share_client.go create mode 100644 sdk/storage/azfile/log.go create mode 100644 sdk/storage/azfile/sas/account.go create mode 100644 sdk/storage/azfile/sas/account_test.go create mode 100644 sdk/storage/azfile/sas/query_params.go create mode 100644 sdk/storage/azfile/sas/query_params_test.go create mode 100644 sdk/storage/azfile/sas/service.go create mode 100644 sdk/storage/azfile/sas/service_test.go create mode 100644 sdk/storage/azfile/sas/url_parts.go create mode 100644 sdk/storage/azfile/sas/url_parts_test.go create mode 100644 sdk/storage/azfile/service/client.go create mode 100644 sdk/storage/azfile/service/client_test.go create mode 100644 sdk/storage/azfile/service/constants.go create mode 100644 sdk/storage/azfile/service/examples_test.go create mode 100644 sdk/storage/azfile/service/models.go create mode 100644 sdk/storage/azfile/service/responses.go create mode 100644 sdk/storage/azfile/share/client.go create mode 100644 sdk/storage/azfile/share/client_test.go create mode 100644 sdk/storage/azfile/share/constants.go create mode 100644 sdk/storage/azfile/share/examples_test.go create mode 100644 sdk/storage/azfile/share/models.go create mode 100644 sdk/storage/azfile/share/responses.go create mode 100644 sdk/storage/azfile/test-resources.json diff --git a/eng/config.json b/eng/config.json index cc51b095a269..09645b7b7bd1 100644 --- a/eng/config.json +++ b/eng/config.json @@ -28,6 +28,10 @@ "Name": "azqueue", "CoverageGoal": 0.60 }, + { + "Name": "azfile", + "CoverageGoal": 0.75 + }, { "Name": "aztemplate", "CoverageGoal": 0.50 diff --git a/sdk/storage/azfile/CHANGELOG.md b/sdk/storage/azfile/CHANGELOG.md new file mode 100644 index 000000000000..04f97b45434f --- /dev/null +++ b/sdk/storage/azfile/CHANGELOG.md @@ -0,0 +1,7 @@ +# Release History + +## 0.1.0 (Unreleased) + +### Features Added + +* This is the initial preview release of the `azfile` library diff --git a/sdk/storage/azfile/LICENSE.txt b/sdk/storage/azfile/LICENSE.txt new file mode 100644 index 000000000000..d1ca00f20a89 --- /dev/null +++ b/sdk/storage/azfile/LICENSE.txt @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE \ No newline at end of file diff --git a/sdk/storage/azfile/README.md b/sdk/storage/azfile/README.md new file mode 100644 index 000000000000..013c2d022248 --- /dev/null +++ b/sdk/storage/azfile/README.md @@ -0,0 +1,266 @@ +# Azure File Storage SDK for Go + +> Service Version: 2020-10-02 + +Azure File Shares offers fully managed file shares in the cloud that are accessible via the industry standard +[Server Message Block (SMB) protocol](https://docs.microsoft.com/windows/desktop/FileIO/microsoft-smb-protocol-and-cifs-protocol-overview). +Azure file shares can be mounted concurrently by cloud or on-premises deployments of Windows, Linux, and macOS. +Additionally, Azure file shares can be cached on Windows Servers with Azure File Sync for fast access near where the data is being used. + +[Source code][source] | [API reference documentation][docs] | [REST API documentation][rest_docs] | [Product documentation][product_docs] + +## Getting started + +### Install the package + +Install the Azure File Storage SDK for Go with [go get][goget]: + +```Powershell +go get github.com/Azure/azure-sdk-for-go/sdk/storage/azfile +``` + +### Prerequisites + +A supported [Go][godevdl] version (the Azure SDK supports the two most recent Go releases). + +You need an [Azure subscription][azure_sub] and a +[Storage Account][storage_account_docs] to use this package. + +To create a new Storage Account, you can use the [Azure Portal][storage_account_create_portal], +[Azure PowerShell][storage_account_create_ps], or the [Azure CLI][storage_account_create_cli]. +Here's an example using the Azure CLI: + +```Powershell +az storage account create --name MyStorageAccount --resource-group MyResourceGroup --location westus --sku Standard_LRS +``` + +### Authenticate the client + +The Azure File Storage SDK for Go allows you to interact with four types of resources: the storage +account itself, file shares, directories, and files. Interaction with these resources starts with an instance of a +client. To create a client object, you will need the storage account's file service URL and a +credential that allows you to access the storage account: + +```go +// create a credential for authenticating using shared key +cred, err := service.NewSharedKeyCredential("", "") +// TODO: handle err + +// create service.Client for the specified storage account that uses the above credential +client, err := service.NewClientWithSharedKeyCredential("https://.file.core.windows.net/", cred, nil) +// TODO: handle err +``` + +## Key concepts + +Azure file shares can be used to: + +- Completely replace or supplement traditional on-premises file servers or NAS devices. +- "Lift and shift" applications to the cloud that expect a file share to store file application or user data. +- Simplify new cloud development projects with shared application settings, diagnostic shares, and Dev/Test/Debug tool file shares. + +### Goroutine safety +We guarantee that all client instance methods are goroutine-safe and independent of each other ([guideline](https://azure.github.io/azure-sdk/golang_introduction.html#thread-safety)). This ensures that the recommendation of reusing client instances is always safe, even across goroutines. + +### Additional concepts + +[Client options](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy#ClientOptions) | +[Accessing the response](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime#WithCaptureResponse) | +[Handling failures](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore#ResponseError) | +[Logging](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore/log) + + +## Examples + +### Create a share and upload a file + +```go +const ( +shareName = "sample-share" +dirName = "sample-dir" +fileName = "sample-file" +) + +// Get a connection string to our Azure Storage account. You can +// obtain your connection string from the Azure Portal (click +// Access Keys under Settings in the Portal Storage account blade) +// or using the Azure CLI with: +// +// az storage account show-connection-string --name --resource-group +// +// And you can provide the connection string to your application +// using an environment variable. +connectionString := "" + +// Path to the local file to upload +localFilePath := "" + +// Get reference to a share and create it +shareClient, err := share.NewClientFromConnectionString(connectionString, shareName, nil) +// TODO: handle error +_, err = shareClient.Create(context.TODO(), nil) +// TODO: handle error + +// Get reference to a directory and create it +dirClient := shareClient.NewDirectoryClient(dirName) +_, err = dirClient.Create(context.TODO(), nil) +// TODO: handle error + +// open the file for reading +file, err := os.OpenFile(localFilePath, os.O_RDONLY, 0) +// TODO: handle error +defer file.Close() + +// get the size of file +fInfo, err := file.Stat() +// TODO: handle error +fSize := fInfo.Size() + +// create the file +fClient := dirClient.NewFileClient(fileName) +_, err = fClient.Create(context.TODO(), fSize, nil) +// TODO: handle error + +// upload the file +err = fClient.UploadFile(context.TODO(), file, nil) +// TODO: handle error +``` + +### Download a file + +```go +const ( +shareName = "sample-share" +dirName = "sample-dir" +fileName = "sample-file" +) + +connectionString := "" + +// Path to the save the downloaded file +localFilePath := "" + +// Get reference to the share +shareClient, err := share.NewClientFromConnectionString(connectionString, shareName, nil) +// TODO: handle error + +// Get reference to the directory +dirClient := shareClient.NewDirectoryClient(dirName) + +// Get reference to the file +fClient := dirClient.NewFileClient(fileName) + +// create or open a local file where we can download the Azure File +file, err := os.Create(localFilePath) +// TODO: handle error +defer file.Close() + +// Download the file +_, err = fClient.DownloadFile(context.TODO(), file, nil) +// TODO: handle error +``` + +### Traverse a share + +```go +const shareName = "sample-share" + +connectionString := "" + +// Get reference to the share +shareClient, err := share.NewClientFromConnectionString(connectionString, shareName, nil) +// TODO: handle error + +// Track the remaining directories to walk, starting from the root +var dirs []*directory.Client +dirs = append(dirs, shareClient.NewRootDirectoryClient()) +for len(dirs) > 0 { + dirClient := dirs[0] + dirs = dirs[1:] + + // Get all the next directory's files and subdirectories + pager := dirClient.NewListFilesAndDirectoriesPager(nil) + for pager.More() { + resp, err := pager.NextPage(context.TODO()) + // TODO: handle error + + for _, d := range resp.Segment.Directories { + fmt.Println(*d.Name) + // Keep walking down directories + dirs = append(dirs, dirClient.NewSubdirectoryClient(*d.Name)) + } + + for _, f := range resp.Segment.Files { + fmt.Println(*f.Name) + } + } +} +``` + +## Troubleshooting + +All File service operations will return an +[*azcore.ResponseError][azcore_response_error] on failure with a +populated `ErrorCode` field. Many of these errors are recoverable. +The [fileerror][file_error] package provides the possible Storage error codes +along with various helper facilities for error handling. + +```go +const ( + connectionString = "" + shareName = "sample-share" +) + +// create a client with the provided connection string +client, err := service.NewClientFromConnectionString(connectionString, nil) +// TODO: handle error + +// try to delete the share, avoiding any potential race conditions with an in-progress or completed deletion +_, err = client.DeleteShare(context.TODO(), shareName, nil) + +if fileerror.HasCode(err, fileerror.ShareBeingDeleted, fileerror.ShareNotFound) { + // ignore any errors if the share is being deleted or already has been deleted +} else if err != nil { + // TODO: some other error +} +``` + +## Next steps + +Get started with our [File samples][samples]. They contain complete examples of the above snippets and more. + +## Contributing + +See the [Storage CONTRIBUTING.md][storage_contrib] for details on building, +testing, and contributing to this library. + +This project welcomes contributions and suggestions. Most contributions require +you to agree to a Contributor License Agreement (CLA) declaring that you have +the right to, and actually do, grant us the rights to use your contribution. For +details, visit [cla.microsoft.com][cla]. + +This project has adopted the [Microsoft Open Source Code of Conduct][coc]. +For more information see the [Code of Conduct FAQ][coc_faq] +or contact [opencode@microsoft.com][coc_contact] with any +additional questions or comments. + + +[source]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage +[docs]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azfile +[rest_docs]: https://docs.microsoft.com/rest/api/storageservices/file-service-rest-api +[product_docs]: https://docs.microsoft.com/azure/storage/files/storage-files-introduction +[godevdl]: https://go.dev/dl/ +[goget]: https://pkg.go.dev/cmd/go#hdr-Add_dependencies_to_current_module_and_install_them +[storage_account_docs]: https://docs.microsoft.com/azure/storage/common/storage-account-overview +[storage_account_create_ps]: https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-powershell +[storage_account_create_cli]: https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-cli +[storage_account_create_portal]: https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-portal +[azure_sub]: https://azure.microsoft.com/free/ +[azcore_response_error]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore#ResponseError +[file_error]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage +[samples]: https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/storage +[storage_contrib]: https://github.com/Azure/azure-sdk-for-go/blob/main/CONTRIBUTING.md +[cla]: https://cla.microsoft.com +[coc]: https://opensource.microsoft.com/codeofconduct/ +[coc_faq]: https://opensource.microsoft.com/codeofconduct/faq/ +[coc_contact]: mailto:opencode@microsoft.com diff --git a/sdk/storage/azfile/assets.json b/sdk/storage/azfile/assets.json new file mode 100644 index 000000000000..47d08f9c3faa --- /dev/null +++ b/sdk/storage/azfile/assets.json @@ -0,0 +1,6 @@ +{ + "AssetsRepo": "Azure/azure-sdk-assets", + "AssetsRepoPrefixPath": "go", + "TagPrefix": "go/storage/azfile", + "Tag": "go/storage/azfile_f1e8c5b99b" +} diff --git a/sdk/storage/azfile/ci.yml b/sdk/storage/azfile/ci.yml new file mode 100644 index 000000000000..4978a37820df --- /dev/null +++ b/sdk/storage/azfile/ci.yml @@ -0,0 +1,33 @@ +trigger: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/storage/azfile + +pr: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/storage/azfile + + +stages: + - template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + ServiceDirectory: 'storage/azfile' + RunLiveTests: true + EnvVars: + AZURE_CLIENT_ID: $(AZFILE_CLIENT_ID) + AZURE_TENANT_ID: $(AZFILE_TENANT_ID) + AZURE_CLIENT_SECRET: $(AZFILE_CLIENT_SECRET) + AZURE_SUBSCRIPTION_ID: $(AZFILE_SUBSCRIPTION_ID) diff --git a/sdk/storage/azfile/directory/client.go b/sdk/storage/azfile/directory/client.go new file mode 100644 index 000000000000..6ea9713d0f20 --- /dev/null +++ b/sdk/storage/azfile/directory/client.go @@ -0,0 +1,205 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package directory + +import ( + "context" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/shared" + "net/http" + "net/url" + "strings" +) + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions base.ClientOptions + +// Client represents a URL to the Azure Storage directory allowing you to manipulate its directories and files. +type Client base.Client[generated.DirectoryClient] + +// NewClientWithNoCredential creates an instance of Client with the specified values. +// This is used to anonymously access a directory or with a shared access signature (SAS) token. +// - directoryURL - the URL of the directory e.g. https://.file.core.windows.net/share/directory? +// - options - client options; pass nil to accept the default values +func NewClientWithNoCredential(directoryURL string, options *ClientOptions) (*Client, error) { + conOptions := shared.GetClientOptions(options) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewDirectoryClient(directoryURL, pl, nil)), nil +} + +// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. +// - directoryURL - the URL of the directory e.g. https://.file.core.windows.net/share/directory +// - cred - a SharedKeyCredential created with the matching directory's storage account and access key +// - options - client options; pass nil to accept the default values +func NewClientWithSharedKeyCredential(directoryURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { + authPolicy := exported.NewSharedKeyCredPolicy(cred) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewDirectoryClient(directoryURL, pl, cred)), nil +} + +// NewClientFromConnectionString creates an instance of Client with the specified values. +// - connectionString - a connection string for the desired storage account +// - shareName - the name of the share within the storage account +// - directoryPath - the path of the directory within the share +// - options - client options; pass nil to accept the default values +func NewClientFromConnectionString(connectionString string, shareName string, directoryPath string, options *ClientOptions) (*Client, error) { + parsed, err := shared.ParseConnectionString(connectionString) + if err != nil { + return nil, err + } + + directoryPath = strings.ReplaceAll(directoryPath, "\\", "/") + parsed.ServiceURL = runtime.JoinPaths(parsed.ServiceURL, shareName, directoryPath) + + if parsed.AccountKey != "" && parsed.AccountName != "" { + credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey) + if err != nil { + return nil, err + } + return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options) + } + + return NewClientWithNoCredential(parsed.ServiceURL, options) +} + +func (d *Client) generated() *generated.DirectoryClient { + return base.InnerClient((*base.Client[generated.DirectoryClient])(d)) +} + +func (d *Client) sharedKey() *SharedKeyCredential { + return base.SharedKey((*base.Client[generated.DirectoryClient])(d)) +} + +// URL returns the URL endpoint used by the Client object. +func (d *Client) URL() string { + return d.generated().Endpoint() +} + +// NewSubdirectoryClient creates a new Client object by concatenating subDirectoryName to the end of this Client's URL. +// The new subdirectory Client uses the same request policy pipeline as the parent directory Client. +func (d *Client) NewSubdirectoryClient(subDirectoryName string) *Client { + subDirectoryName = url.PathEscape(subDirectoryName) + subDirectoryURL := runtime.JoinPaths(d.URL(), subDirectoryName) + return (*Client)(base.NewDirectoryClient(subDirectoryURL, d.generated().Pipeline(), d.sharedKey())) +} + +// NewFileClient creates a new file.Client object by concatenating fileName to the end of this Client's URL. +// The new file.Client uses the same request policy pipeline as the Client. +func (d *Client) NewFileClient(fileName string) *file.Client { + fileName = url.PathEscape(fileName) + fileURL := runtime.JoinPaths(d.URL(), fileName) + return (*file.Client)(base.NewFileClient(fileURL, d.generated().Pipeline(), d.sharedKey())) +} + +// Create operation creates a new directory under the specified share or parent directory. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/create-directory. +func (d *Client) Create(ctx context.Context, options *CreateOptions) (CreateResponse, error) { + fileAttributes, fileCreationTime, fileLastWriteTime, opts := options.format() + resp, err := d.generated().Create(ctx, fileAttributes, fileCreationTime, fileLastWriteTime, opts) + return resp, err +} + +// Delete operation removes the specified empty directory. Note that the directory must be empty before it can be deleted. +// Deleting directories that aren't empty returns error 409 (Directory Not Empty). +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/delete-directory. +func (d *Client) Delete(ctx context.Context, options *DeleteOptions) (DeleteResponse, error) { + opts := options.format() + resp, err := d.generated().Delete(ctx, opts) + return resp, err +} + +// GetProperties operation returns all system properties for the specified directory, and it can also be used to check the existence of a directory. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-directory-properties. +func (d *Client) GetProperties(ctx context.Context, options *GetPropertiesOptions) (GetPropertiesResponse, error) { + opts := options.format() + resp, err := d.generated().GetProperties(ctx, opts) + return resp, err +} + +// SetProperties operation sets system properties for the specified directory. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/set-directory-properties. +func (d *Client) SetProperties(ctx context.Context, options *SetPropertiesOptions) (SetPropertiesResponse, error) { + fileAttributes, fileCreationTime, fileLastWriteTime, opts := options.format() + resp, err := d.generated().SetProperties(ctx, fileAttributes, fileCreationTime, fileLastWriteTime, opts) + return resp, err +} + +// SetMetadata operation sets user-defined metadata for the specified directory. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/set-directory-metadata. +func (d *Client) SetMetadata(ctx context.Context, options *SetMetadataOptions) (SetMetadataResponse, error) { + opts := options.format() + resp, err := d.generated().SetMetadata(ctx, opts) + return resp, err +} + +// ForceCloseHandles operation closes a handle or handles opened on a directory. +// - handleID - Specifies the handle ID to be closed. Use an asterisk (*) as a wildcard string to specify all handles. +// +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/force-close-handles. +func (d *Client) ForceCloseHandles(ctx context.Context, handleID string, options *ForceCloseHandlesOptions) (ForceCloseHandlesResponse, error) { + opts := options.format() + resp, err := d.generated().ForceCloseHandles(ctx, handleID, opts) + return resp, err +} + +// ListHandles operation returns a list of open handles on a directory. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/list-handles. +func (d *Client) ListHandles(ctx context.Context, options *ListHandlesOptions) (ListHandlesResponse, error) { + opts := options.format() + resp, err := d.generated().ListHandles(ctx, opts) + return resp, err +} + +// NewListFilesAndDirectoriesPager operation returns a pager for the files and directories starting from the specified Marker. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/list-directories-and-files. +func (d *Client) NewListFilesAndDirectoriesPager(options *ListFilesAndDirectoriesOptions) *runtime.Pager[ListFilesAndDirectoriesResponse] { + listOptions := generated.DirectoryClientListFilesAndDirectoriesSegmentOptions{} + if options != nil { + listOptions.Include = options.Include.format() + listOptions.IncludeExtendedInfo = options.IncludeExtendedInfo + listOptions.Marker = options.Marker + listOptions.Maxresults = options.MaxResults + listOptions.Prefix = options.Prefix + listOptions.Sharesnapshot = options.ShareSnapshot + } + + return runtime.NewPager(runtime.PagingHandler[ListFilesAndDirectoriesResponse]{ + More: func(page ListFilesAndDirectoriesResponse) bool { + return page.NextMarker != nil && len(*page.NextMarker) > 0 + }, + Fetcher: func(ctx context.Context, page *ListFilesAndDirectoriesResponse) (ListFilesAndDirectoriesResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = d.generated().ListFilesAndDirectoriesSegmentCreateRequest(ctx, &listOptions) + } else { + listOptions.Marker = page.NextMarker + req, err = d.generated().ListFilesAndDirectoriesSegmentCreateRequest(ctx, &listOptions) + } + if err != nil { + return ListFilesAndDirectoriesResponse{}, err + } + resp, err := d.generated().Pipeline().Do(req) + if err != nil { + return ListFilesAndDirectoriesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ListFilesAndDirectoriesResponse{}, runtime.NewResponseError(resp) + } + return d.generated().ListFilesAndDirectoriesSegmentHandleResponse(resp) + }, + }) +} diff --git a/sdk/storage/azfile/directory/client_test.go b/sdk/storage/azfile/directory/client_test.go new file mode 100644 index 000000000000..4e91138e9aad --- /dev/null +++ b/sdk/storage/azfile/directory/client_test.go @@ -0,0 +1,1117 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package directory_test + +import ( + "context" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/recording" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/directory" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/fileerror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/testcommon" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/share" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "testing" + "time" +) + +func Test(t *testing.T) { + recordMode := recording.GetRecordMode() + t.Logf("Running directory Tests in %s mode\n", recordMode) + if recordMode == recording.LiveMode { + suite.Run(t, &DirectoryRecordedTestsSuite{}) + suite.Run(t, &DirectoryUnrecordedTestsSuite{}) + } else if recordMode == recording.PlaybackMode { + suite.Run(t, &DirectoryRecordedTestsSuite{}) + } else if recordMode == recording.RecordingMode { + suite.Run(t, &DirectoryRecordedTestsSuite{}) + } +} + +func (d *DirectoryRecordedTestsSuite) BeforeTest(suite string, test string) { + testcommon.BeforeTest(d.T(), suite, test) +} + +func (d *DirectoryRecordedTestsSuite) AfterTest(suite string, test string) { + testcommon.AfterTest(d.T(), suite, test) +} + +func (d *DirectoryUnrecordedTestsSuite) BeforeTest(suite string, test string) { + +} + +func (d *DirectoryUnrecordedTestsSuite) AfterTest(suite string, test string) { + +} + +type DirectoryRecordedTestsSuite struct { + suite.Suite +} + +type DirectoryUnrecordedTestsSuite struct { + suite.Suite +} + +func (d *DirectoryRecordedTestsSuite) TestDirNewDirectoryClient() { + _require := require.New(d.T()) + testName := d.T().Name() + + accountName, _ := testcommon.GetGenericAccountInfo(testcommon.TestAccountDefault) + _require.Greater(len(accountName), 0) + + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := svcClient.NewShareClient(shareName) + + dirName := testcommon.GenerateDirectoryName(testName) + dirClient := shareClient.NewDirectoryClient(dirName) + + subDirName := "inner" + dirName + subDirClient := dirClient.NewSubdirectoryClient(subDirName) + + correctURL := "https://" + accountName + ".file.core.windows.net/" + shareName + "/" + dirName + "/" + subDirName + _require.Equal(subDirClient.URL(), correctURL) +} + +func (d *DirectoryRecordedTestsSuite) TestDirCreateFileURL() { + _require := require.New(d.T()) + testName := d.T().Name() + + accountName, _ := testcommon.GetGenericAccountInfo(testcommon.TestAccountDefault) + _require.Greater(len(accountName), 0) + + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := svcClient.NewShareClient(shareName) + + dirName := testcommon.GenerateDirectoryName(testName) + dirClient := shareClient.NewDirectoryClient(dirName) + + fileName := testcommon.GenerateFileName(testName) + fileClient := dirClient.NewFileClient(fileName) + + correctURL := "https://" + accountName + ".file.core.windows.net/" + shareName + "/" + dirName + "/" + fileName + _require.Equal(fileClient.URL(), correctURL) +} + +func (d *DirectoryRecordedTestsSuite) TestDirectoryCreateUsingSharedKey() { + _require := require.New(d.T()) + testName := d.T().Name() + + cred, err := testcommon.GetGenericSharedKeyCredential(testcommon.TestAccountDefault) + _require.NoError(err) + + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + dirURL := "https://" + cred.AccountName() + ".file.core.windows.net/" + shareName + "/" + dirName + + options := &directory.ClientOptions{} + testcommon.SetClientOptions(d.T(), &options.ClientOptions) + dirClient, err := directory.NewClientWithSharedKeyCredential(dirURL, cred, options) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), nil) + _require.NoError(err) + _require.NotNil(resp.ETag) + _require.NotNil(resp.RequestID) + _require.Equal(resp.LastModified.IsZero(), false) + _require.Equal(resp.FileCreationTime.IsZero(), false) + _require.Equal(resp.FileLastWriteTime.IsZero(), false) + _require.Equal(resp.FileChangeTime.IsZero(), false) +} + +func (d *DirectoryRecordedTestsSuite) TestDirectoryCreateUsingConnectionString() { + _require := require.New(d.T()) + testName := d.T().Name() + + connString, err := testcommon.GetGenericConnectionString(testcommon.TestAccountDefault) + _require.NoError(err) + + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + options := &directory.ClientOptions{} + testcommon.SetClientOptions(d.T(), &options.ClientOptions) + dirClient, err := directory.NewClientFromConnectionString(*connString, shareName, dirName, options) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), nil) + _require.NoError(err) + _require.NotNil(resp.ETag) + _require.NotNil(resp.RequestID) + _require.Equal(resp.LastModified.IsZero(), false) + _require.Equal(resp.FileCreationTime.IsZero(), false) + _require.Equal(resp.FileLastWriteTime.IsZero(), false) + _require.Equal(resp.FileChangeTime.IsZero(), false) + + innerDirName1 := "innerdir1" + dirPath := dirName + "/" + innerDirName1 + dirClient1, err := directory.NewClientFromConnectionString(*connString, shareName, dirPath, options) + _require.NoError(err) + + resp, err = dirClient1.Create(context.Background(), nil) + _require.NoError(err) + _require.NotNil(resp.RequestID) + _require.Equal(resp.LastModified.IsZero(), false) + _require.Equal(resp.FileCreationTime.IsZero(), false) + + innerDirName2 := "innerdir2" + // using '\' as path separator between directories + dirPath = dirName + "\\" + innerDirName1 + "\\" + innerDirName2 + dirClient2, err := directory.NewClientFromConnectionString(*connString, shareName, dirPath, options) + _require.NoError(err) + + resp, err = dirClient2.Create(context.Background(), nil) + _require.NoError(err) + _require.NotNil(resp.RequestID) + _require.Equal(resp.LastModified.IsZero(), false) + _require.Equal(resp.FileCreationTime.IsZero(), false) +} + +func (d *DirectoryRecordedTestsSuite) TestDirectoryCreateNegativeMultiLevel() { + _require := require.New(d.T()) + testName := d.T().Name() + + connString, err := testcommon.GetGenericConnectionString(testcommon.TestAccountDefault) + _require.NoError(err) + + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + // dirPath where parent dir does not exist + dirPath := "a/b/c/d/" + dirName + options := &directory.ClientOptions{} + testcommon.SetClientOptions(d.T(), &options.ClientOptions) + dirClient, err := directory.NewClientFromConnectionString(*connString, shareName, dirPath, options) + _require.NoError(err) + + resp, err := dirClient.Create(context.Background(), nil) + _require.Error(err) + _require.Nil(resp.RequestID) + testcommon.ValidateFileErrorCode(_require, err, fileerror.ParentNotFound) +} + +func (d *DirectoryRecordedTestsSuite) TestDirCreateDeleteDefault() { + _require := require.New(d.T()) + testName := d.T().Name() + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + dirClient := shareClient.NewDirectoryClient(dirName) + _require.NoError(err) + + cResp, err := dirClient.Create(context.Background(), nil) + _require.NoError(err) + _require.NotNil(cResp.RequestID) + _require.NotNil(cResp.ETag) + _require.Equal(cResp.Date.IsZero(), false) + _require.Equal(cResp.LastModified.IsZero(), false) + _require.Equal(cResp.FileCreationTime.IsZero(), false) + _require.Equal(cResp.FileLastWriteTime.IsZero(), false) + _require.Equal(cResp.FileChangeTime.IsZero(), false) + + gResp, err := dirClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.NotNil(gResp.RequestID) + _require.NotNil(gResp.ETag) + _require.Equal(gResp.Date.IsZero(), false) + _require.Equal(gResp.LastModified.IsZero(), false) + _require.Equal(gResp.FileCreationTime.IsZero(), false) + _require.Equal(gResp.FileLastWriteTime.IsZero(), false) + _require.Equal(gResp.FileChangeTime.IsZero(), false) +} + +func (d *DirectoryRecordedTestsSuite) TestDirSetPropertiesDefault() { + _require := require.New(d.T()) + testName := d.T().Name() + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + dirClient := testcommon.GetDirectoryClient(dirName, shareClient) + + cResp, err := dirClient.Create(context.Background(), nil) + _require.NoError(err) + _require.NotNil(cResp.FilePermissionKey) + + // Set the custom permissions + sResp, err := dirClient.SetProperties(context.Background(), nil) + _require.NoError(err) + _require.NotNil(sResp.FileCreationTime) + _require.NotNil(sResp.FileLastWriteTime) + _require.NotNil(sResp.FilePermissionKey) + _require.Equal(*sResp.FilePermissionKey, *cResp.FilePermissionKey) + + gResp, err := dirClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.NotNil(gResp.FileCreationTime) + _require.NotNil(gResp.FileLastWriteTime) + _require.NotNil(gResp.FilePermissionKey) + _require.Equal(*gResp.FilePermissionKey, *sResp.FilePermissionKey) + _require.Equal(*gResp.FileCreationTime, *sResp.FileCreationTime) + _require.Equal(*gResp.FileLastWriteTime, *sResp.FileLastWriteTime) + _require.Equal(*gResp.FileAttributes, *sResp.FileAttributes) +} + +func (d *DirectoryRecordedTestsSuite) TestDirSetPropertiesNonDefault() { + _require := require.New(d.T()) + testName := d.T().Name() + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + dirClient := testcommon.GetDirectoryClient(dirName, shareClient) + + cResp, err := dirClient.Create(context.Background(), nil) + _require.NoError(err) + _require.NotNil(cResp.FilePermissionKey) + + currTime, err := time.Parse(time.UnixDate, "Fri Mar 31 21:00:00 GMT 2023") + _require.NoError(err) + creationTime := currTime.Add(5 * time.Minute).Round(time.Microsecond) + lastWriteTime := currTime.Add(10 * time.Minute).Round(time.Millisecond) + + // Set the custom permissions + sResp, err := dirClient.SetProperties(context.Background(), &directory.SetPropertiesOptions{ + FileSMBProperties: &file.SMBProperties{ + Attributes: &file.NTFSFileAttributes{ + ReadOnly: true, + System: true, + }, + CreationTime: &creationTime, + LastWriteTime: &lastWriteTime, + }, + FilePermissions: &file.Permissions{ + Permission: &testcommon.SampleSDDL, + }, + }) + _require.NoError(err) + _require.NotNil(sResp.FileCreationTime) + _require.NotNil(sResp.FileLastWriteTime) + _require.NotNil(sResp.FilePermissionKey) + _require.NotEqual(*sResp.FilePermissionKey, *cResp.FilePermissionKey) + _require.Equal(*sResp.FileCreationTime, creationTime.UTC()) + _require.Equal(*sResp.FileLastWriteTime, lastWriteTime.UTC()) + + gResp, err := dirClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.NotNil(gResp.FileCreationTime) + _require.NotNil(gResp.FileLastWriteTime) + _require.NotNil(gResp.FilePermissionKey) + _require.Equal(*gResp.FilePermissionKey, *sResp.FilePermissionKey) + _require.Equal(*gResp.FileCreationTime, *sResp.FileCreationTime) + _require.Equal(*gResp.FileLastWriteTime, *sResp.FileLastWriteTime) + _require.Equal(*gResp.FileAttributes, *sResp.FileAttributes) +} + +func (d *DirectoryUnrecordedTestsSuite) TestDirCreateDeleteNonDefault() { + _require := require.New(d.T()) + testName := d.T().Name() + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + dirClient := testcommon.GetDirectoryClient(dirName, shareClient) + + md := map[string]*string{ + "Foo": to.Ptr("FooValuE"), + "Bar": to.Ptr("bArvaLue"), + } + + cResp, err := dirClient.Create(context.Background(), &directory.CreateOptions{ + Metadata: md, + FileSMBProperties: &file.SMBProperties{ + Attributes: &file.NTFSFileAttributes{None: true}, + CreationTime: to.Ptr(time.Now().Add(5 * time.Minute)), + LastWriteTime: to.Ptr(time.Now().Add(10 * time.Minute)), + }, + FilePermissions: &file.Permissions{ + Permission: &testcommon.SampleSDDL, + }, + }) + _require.NoError(err) + _require.NotNil(cResp.FilePermissionKey) + _require.Equal(cResp.Date.IsZero(), false) + _require.NotNil(cResp.ETag) + _require.Equal(cResp.LastModified.IsZero(), false) + _require.NotNil(cResp.RequestID) + + gResp, err := dirClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp.FilePermissionKey, *cResp.FilePermissionKey) + _require.EqualValues(gResp.Metadata, md) + + // Creating again will result in 409 and ResourceAlreadyExists. + _, err = dirClient.Create(context.Background(), &directory.CreateOptions{Metadata: md}) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.ResourceAlreadyExists) + + dResp, err := dirClient.Delete(context.Background(), nil) + _require.NoError(err) + _require.Equal(dResp.Date.IsZero(), false) + _require.NotNil(dResp.RequestID) + _require.NotNil(dResp.Version) + + _, err = dirClient.GetProperties(context.Background(), nil) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.ResourceNotFound) +} + +func (d *DirectoryRecordedTestsSuite) TestDirCreateNegativePermissions() { + _require := require.New(d.T()) + testName := d.T().Name() + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + dirClient := testcommon.GetDirectoryClient(dirName, shareClient) + subDirClient := dirClient.NewSubdirectoryClient("subdir" + dirName) + + cResp, err := dirClient.Create(context.Background(), nil) + _require.NoError(err) + _require.NotNil(cResp.FilePermissionKey) + + // having both Permission and PermissionKey set returns error + _, err = subDirClient.Create(context.Background(), &directory.CreateOptions{ + FileSMBProperties: &file.SMBProperties{ + Attributes: &file.NTFSFileAttributes{None: true}, + }, + FilePermissions: &file.Permissions{ + Permission: &testcommon.SampleSDDL, + PermissionKey: cResp.FilePermissionKey, + }, + }) + _require.Error(err) +} + +func (d *DirectoryRecordedTestsSuite) TestDirCreateNegativeAttributes() { + _require := require.New(d.T()) + testName := d.T().Name() + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirClient := testcommon.GetDirectoryClient(testcommon.GenerateDirectoryName(testName), shareClient) + + // None attribute must be used alone. + _, err = dirClient.Create(context.Background(), &directory.CreateOptions{ + FileSMBProperties: &file.SMBProperties{ + Attributes: &file.NTFSFileAttributes{None: true, ReadOnly: true}, + }, + }) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.InvalidHeaderValue) +} + +func (d *DirectoryRecordedTestsSuite) TestDirCreateDeleteNegativeMultiLevelDir() { + _require := require.New(d.T()) + testName := d.T().Name() + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + parentDirName := "parent" + testcommon.GenerateDirectoryName(testName) + parentDirClient := shareClient.NewDirectoryClient(parentDirName) + + subDirName := "subdir" + testcommon.GenerateDirectoryName(testName) + subDirClient := parentDirClient.NewSubdirectoryClient(subDirName) + + // Directory create with subDirClient + _, err = subDirClient.Create(context.Background(), nil) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.ParentNotFound) + + _, err = parentDirClient.Create(context.Background(), nil) + _require.NoError(err) + + _, err = subDirClient.Create(context.Background(), nil) + _require.NoError(err) + + _, err = subDirClient.GetProperties(context.Background(), nil) + _require.NoError(err) + + // Delete level by level + // Delete Non-empty directory should fail + _, err = parentDirClient.Delete(context.Background(), nil) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.DirectoryNotEmpty) + + _, err = subDirClient.Delete(context.Background(), nil) + _require.NoError(err) + + _, err = parentDirClient.Delete(context.Background(), nil) + _require.NoError(err) +} + +func (d *DirectoryRecordedTestsSuite) TestDirCreateEndWithSlash() { + _require := require.New(d.T()) + testName := d.T().Name() + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + "/" + dirClient := testcommon.GetDirectoryClient(dirName, shareClient) + + cResp, err := dirClient.Create(context.Background(), nil) + _require.NoError(err) + _require.Equal(cResp.Date.IsZero(), false) + _require.NotNil(cResp.ETag) + _require.Equal(cResp.LastModified.IsZero(), false) + _require.NotNil(cResp.RequestID) + _require.NotNil(cResp.Version) + + _, err = dirClient.GetProperties(context.Background(), nil) + _require.NoError(err) +} + +func (d *DirectoryRecordedTestsSuite) TestDirGetSetMetadataDefault() { + _require := require.New(d.T()) + testName := d.T().Name() + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + dirClient := testcommon.CreateNewDirectory(context.Background(), _require, dirName, shareClient) + defer testcommon.DeleteDirectory(context.Background(), _require, dirClient) + + sResp, err := dirClient.SetMetadata(context.Background(), nil) + _require.NoError(err) + _require.Equal(sResp.Date.IsZero(), false) + _require.NotNil(sResp.ETag) + _require.NotNil(sResp.RequestID) + _require.NotNil(sResp.Version) + _require.NotNil(sResp.IsServerEncrypted) + + gResp, err := dirClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(gResp.Date.IsZero(), false) + _require.NotNil(gResp.ETag) + _require.Equal(gResp.LastModified.IsZero(), false) + _require.NotNil(gResp.RequestID) + _require.NotNil(gResp.Version) + _require.NotNil(gResp.IsServerEncrypted) + _require.Len(gResp.Metadata, 0) +} + +func (d *DirectoryRecordedTestsSuite) TestDirGetSetMetadataNonDefault() { + _require := require.New(d.T()) + testName := d.T().Name() + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + dirClient := testcommon.CreateNewDirectory(context.Background(), _require, dirName, shareClient) + defer testcommon.DeleteDirectory(context.Background(), _require, dirClient) + + md := map[string]*string{ + "Foo": to.Ptr("FooValuE"), + "Bar": to.Ptr("bArvaLue"), + } + + sResp, err := dirClient.SetMetadata(context.Background(), &directory.SetMetadataOptions{ + Metadata: md, + }) + _require.NoError(err) + _require.Equal(sResp.Date.IsZero(), false) + _require.NotNil(sResp.ETag) + _require.NotNil(sResp.RequestID) + _require.NotNil(sResp.Version) + _require.NotNil(sResp.IsServerEncrypted) + + gResp, err := dirClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(gResp.Date.IsZero(), false) + _require.NotNil(gResp.ETag) + _require.Equal(gResp.LastModified.IsZero(), false) + _require.NotNil(gResp.RequestID) + _require.NotNil(gResp.Version) + _require.NotNil(gResp.IsServerEncrypted) + _require.EqualValues(gResp.Metadata, md) +} + +func (d *DirectoryRecordedTestsSuite) TestDirSetMetadataNegative() { + _require := require.New(d.T()) + testName := d.T().Name() + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + dirClient := testcommon.CreateNewDirectory(context.Background(), _require, dirName, shareClient) + defer testcommon.DeleteDirectory(context.Background(), _require, dirClient) + + md := map[string]*string{ + "!@#$%^&*()": to.Ptr("!@#$%^&*()"), + } + + _, err = dirClient.SetMetadata(context.Background(), &directory.SetMetadataOptions{ + Metadata: md, + }) + _require.Error(err) +} + +func (d *DirectoryRecordedTestsSuite) TestDirGetPropertiesNegative() { + _require := require.New(d.T()) + testName := d.T().Name() + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + dirClient := testcommon.GetDirectoryClient(dirName, shareClient) + + _, err = dirClient.GetProperties(context.Background(), nil) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.ResourceNotFound) +} + +func (d *DirectoryRecordedTestsSuite) TestDirGetPropertiesWithBaseDirectory() { + _require := require.New(d.T()) + testName := d.T().Name() + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirClient := shareClient.NewRootDirectoryClient() + + gResp, err := dirClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.NotNil(gResp.ETag) + _require.Equal(gResp.LastModified.IsZero(), false) + _require.NotNil(gResp.RequestID) + _require.NotNil(gResp.Version) + _require.Equal(gResp.Date.IsZero(), false) + _require.Equal(gResp.FileCreationTime.IsZero(), false) + _require.Equal(gResp.FileLastWriteTime.IsZero(), false) + _require.Equal(gResp.FileChangeTime.IsZero(), false) + _require.NotNil(gResp.IsServerEncrypted) +} + +func (d *DirectoryRecordedTestsSuite) TestDirGetSetMetadataMergeAndReplace() { + _require := require.New(d.T()) + testName := d.T().Name() + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + dirClient := testcommon.CreateNewDirectory(context.Background(), _require, dirName, shareClient) + defer testcommon.DeleteDirectory(context.Background(), _require, dirClient) + + md := map[string]*string{ + "Color": to.Ptr("RED"), + } + + sResp, err := dirClient.SetMetadata(context.Background(), &directory.SetMetadataOptions{ + Metadata: md, + }) + _require.NoError(err) + _require.NotNil(sResp.RequestID) + _require.NotNil(sResp.IsServerEncrypted) + + gResp, err := dirClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(gResp.Date.IsZero(), false) + _require.NotNil(gResp.ETag) + _require.Equal(gResp.LastModified.IsZero(), false) + _require.NotNil(gResp.RequestID) + _require.NotNil(gResp.Version) + _require.NotNil(gResp.IsServerEncrypted) + _require.EqualValues(gResp.Metadata, md) + + md2 := map[string]*string{ + "Color": to.Ptr("WHITE"), + } + + sResp, err = dirClient.SetMetadata(context.Background(), &directory.SetMetadataOptions{ + Metadata: md2, + }) + _require.NoError(err) + _require.NotNil(sResp.RequestID) + _require.NotNil(sResp.IsServerEncrypted) + + gResp, err = dirClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(gResp.Date.IsZero(), false) + _require.NotNil(gResp.ETag) + _require.Equal(gResp.LastModified.IsZero(), false) + _require.NotNil(gResp.RequestID) + _require.NotNil(gResp.Version) + _require.NotNil(gResp.IsServerEncrypted) + _require.EqualValues(gResp.Metadata, md2) +} + +func (d *DirectoryRecordedTestsSuite) TestDirListFilesAndDirsDefault() { + _require := require.New(d.T()) + testName := d.T().Name() + + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + fileName := testcommon.GenerateFileName(testName) + + for i := 0; i < 10; i++ { + _ = testcommon.CreateNewDirectory(context.Background(), _require, dirName+fmt.Sprintf("%v", i), shareClient) + } + + for i := 0; i < 5; i++ { + _ = testcommon.CreateNewFileFromShare(context.Background(), _require, fileName+fmt.Sprintf("%v", i), 2048, shareClient) + } + + dirCtr, fileCtr := 0, 0 + pager := shareClient.NewRootDirectoryClient().NewListFilesAndDirectoriesPager(nil) + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.NoError(err) + dirCtr += len(resp.Segment.Directories) + fileCtr += len(resp.Segment.Files) + for _, dir := range resp.Segment.Directories { + _require.NotNil(dir.Name) + _require.NotNil(dir.ID) + _require.Nil(dir.Attributes) + _require.Nil(dir.PermissionKey) + _require.Nil(dir.Properties.ETag) + _require.Nil(dir.Properties.ChangeTime) + _require.Nil(dir.Properties.CreationTime) + _require.Nil(dir.Properties.ContentLength) + } + for _, f := range resp.Segment.Files { + _require.NotNil(f.Name) + _require.NotNil(f.ID) + _require.Nil(f.Attributes) + _require.Nil(f.PermissionKey) + _require.Nil(f.Properties.ETag) + _require.Nil(f.Properties.ChangeTime) + _require.Nil(f.Properties.CreationTime) + _require.NotNil(f.Properties.ContentLength) + _require.Equal(*f.Properties.ContentLength, int64(2048)) + } + } + _require.Equal(dirCtr, 10) + _require.Equal(fileCtr, 5) +} + +func (d *DirectoryRecordedTestsSuite) TestDirListFilesAndDirsInclude() { + _require := require.New(d.T()) + testName := d.T().Name() + + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + fileName := testcommon.GenerateFileName(testName) + + for i := 0; i < 10; i++ { + _ = testcommon.CreateNewDirectory(context.Background(), _require, dirName+fmt.Sprintf("%v", i), shareClient) + } + + for i := 0; i < 5; i++ { + _ = testcommon.CreateNewFileFromShare(context.Background(), _require, fileName+fmt.Sprintf("%v", i), 2048, shareClient) + } + + dirCtr, fileCtr := 0, 0 + pager := shareClient.NewRootDirectoryClient().NewListFilesAndDirectoriesPager(&directory.ListFilesAndDirectoriesOptions{ + Include: directory.ListFilesInclude{Timestamps: true, ETag: true, Attributes: true, PermissionKey: true}, + IncludeExtendedInfo: to.Ptr(true), + }) + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.NoError(err) + dirCtr += len(resp.Segment.Directories) + fileCtr += len(resp.Segment.Files) + for _, dir := range resp.Segment.Directories { + _require.NotNil(dir.Name) + _require.NotNil(dir.ID) + _require.NotNil(dir.Attributes) + _require.NotNil(dir.PermissionKey) + _require.NotNil(dir.Properties.ETag) + _require.NotNil(dir.Properties.ChangeTime) + _require.NotNil(dir.Properties.CreationTime) + _require.Nil(dir.Properties.ContentLength) + } + for _, f := range resp.Segment.Files { + _require.NotNil(f.Name) + _require.NotNil(f.ID) + _require.NotNil(f.Attributes) + _require.NotNil(f.PermissionKey) + _require.NotNil(f.Properties.ETag) + _require.NotNil(f.Properties.ChangeTime) + _require.NotNil(f.Properties.CreationTime) + _require.NotNil(f.Properties.ContentLength) + _require.Equal(*f.Properties.ContentLength, int64(2048)) + } + } + _require.Equal(dirCtr, 10) + _require.Equal(fileCtr, 5) +} + +func (d *DirectoryRecordedTestsSuite) TestDirListFilesAndDirsMaxResultsAndMarker() { + _require := require.New(d.T()) + testName := d.T().Name() + + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + fileName := testcommon.GenerateFileName(testName) + + for i := 0; i < 10; i++ { + _ = testcommon.CreateNewDirectory(context.Background(), _require, dirName+fmt.Sprintf("%v", i), shareClient) + } + + for i := 0; i < 5; i++ { + _ = testcommon.CreateNewFileFromShare(context.Background(), _require, fileName+fmt.Sprintf("%v", i), 2048, shareClient) + } + + dirCtr, fileCtr := 0, 0 + pager := shareClient.NewRootDirectoryClient().NewListFilesAndDirectoriesPager(&directory.ListFilesAndDirectoriesOptions{ + MaxResults: to.Ptr(int32(2)), + }) + resp, err := pager.NextPage(context.Background()) + _require.NoError(err) + dirCtr += len(resp.Segment.Directories) + fileCtr += len(resp.Segment.Files) + _require.Equal(dirCtr+fileCtr, 2) + + pager = shareClient.NewRootDirectoryClient().NewListFilesAndDirectoriesPager(&directory.ListFilesAndDirectoriesOptions{ + Marker: resp.NextMarker, + MaxResults: to.Ptr(int32(5)), + }) + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.NoError(err) + dirCtr += len(resp.Segment.Directories) + fileCtr += len(resp.Segment.Files) + } + _require.Equal(dirCtr, 10) + _require.Equal(fileCtr, 5) +} + +func (d *DirectoryRecordedTestsSuite) TestDirListFilesAndDirsWithPrefix() { + _require := require.New(d.T()) + testName := d.T().Name() + + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + fileName := testcommon.GenerateFileName(testName) + + for i := 0; i < 10; i++ { + _ = testcommon.CreateNewDirectory(context.Background(), _require, fmt.Sprintf("%v", i)+dirName, shareClient) + } + + for i := 0; i < 5; i++ { + _ = testcommon.CreateNewFileFromShare(context.Background(), _require, fmt.Sprintf("%v", i)+fileName, 2048, shareClient) + } + + dirCtr, fileCtr := 0, 0 + pager := shareClient.NewRootDirectoryClient().NewListFilesAndDirectoriesPager(&directory.ListFilesAndDirectoriesOptions{ + Prefix: to.Ptr("1"), + }) + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.NoError(err) + dirCtr += len(resp.Segment.Directories) + fileCtr += len(resp.Segment.Files) + if len(resp.Segment.Directories) > 0 { + _require.NotNil(resp.Segment.Directories[0].Name) + _require.Equal(*resp.Segment.Directories[0].Name, "1"+dirName) + } + if len(resp.Segment.Files) > 0 { + _require.NotNil(resp.Segment.Files[0].Name) + _require.Equal(*resp.Segment.Files[0].Name, "1"+fileName) + } + } + _require.Equal(dirCtr, 1) + _require.Equal(fileCtr, 1) +} + +func (d *DirectoryRecordedTestsSuite) TestDirListFilesAndDirsMaxResultsNegative() { + _require := require.New(d.T()) + testName := d.T().Name() + + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + fileName := testcommon.GenerateFileName(testName) + + for i := 0; i < 2; i++ { + _ = testcommon.CreateNewDirectory(context.Background(), _require, dirName+fmt.Sprintf("%v", i), shareClient) + } + + for i := 0; i < 2; i++ { + _ = testcommon.CreateNewFileFromShare(context.Background(), _require, fileName+fmt.Sprintf("%v", i), 2048, shareClient) + } + + pager := shareClient.NewRootDirectoryClient().NewListFilesAndDirectoriesPager(&directory.ListFilesAndDirectoriesOptions{ + MaxResults: to.Ptr(int32(-1)), + }) + _, err = pager.NextPage(context.Background()) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.OutOfRangeQueryParameterValue) +} + +func (d *DirectoryRecordedTestsSuite) TestDirListFilesAndDirsSnapshot() { + _require := require.New(d.T()) + testName := d.T().Name() + + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer func() { + _, err := shareClient.Delete(context.Background(), &share.DeleteOptions{DeleteSnapshots: to.Ptr(share.DeleteSnapshotsOptionTypeInclude)}) + _require.NoError(err) + }() + + dirName := testcommon.GenerateDirectoryName(testName) + fileName := testcommon.GenerateFileName(testName) + + for i := 0; i < 10; i++ { + _ = testcommon.CreateNewDirectory(context.Background(), _require, dirName+fmt.Sprintf("%v", i), shareClient) + } + + for i := 0; i < 5; i++ { + _ = testcommon.CreateNewFileFromShare(context.Background(), _require, fileName+fmt.Sprintf("%v", i), 2048, shareClient) + } + + snapResp, err := shareClient.CreateSnapshot(context.Background(), nil) + _require.NoError(err) + _require.NotNil(snapResp.Snapshot) + + _, err = shareClient.NewRootDirectoryClient().GetProperties(context.Background(), &directory.GetPropertiesOptions{ShareSnapshot: snapResp.Snapshot}) + _require.NoError(err) + + dirCtr, fileCtr := 0, 0 + pager := shareClient.NewRootDirectoryClient().NewListFilesAndDirectoriesPager(&directory.ListFilesAndDirectoriesOptions{ + ShareSnapshot: snapResp.Snapshot, + }) + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.NoError(err) + dirCtr += len(resp.Segment.Directories) + fileCtr += len(resp.Segment.Files) + } + _require.Equal(dirCtr, 10) + _require.Equal(fileCtr, 5) +} + +func (d *DirectoryRecordedTestsSuite) TestDirListFilesAndDirsInsideDir() { + _require := require.New(d.T()) + testName := d.T().Name() + + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + fileName := testcommon.GenerateFileName(testName) + + dirClient := testcommon.CreateNewDirectory(context.Background(), _require, dirName, shareClient) + + for i := 0; i < 5; i++ { + _, err = dirClient.NewSubdirectoryClient("subdir"+fmt.Sprintf("%v", i)).Create(context.Background(), nil) + _require.NoError(err) + } + + for i := 0; i < 5; i++ { + _, err = dirClient.NewFileClient(fileName+fmt.Sprintf("%v", i)).Create(context.Background(), 0, nil) + _require.NoError(err) + } + + dirCtr, fileCtr := 0, 0 + pager := dirClient.NewListFilesAndDirectoriesPager(&directory.ListFilesAndDirectoriesOptions{ + Include: directory.ListFilesInclude{Timestamps: true, ETag: true, Attributes: true, PermissionKey: true}, + }) + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.NoError(err) + dirCtr += len(resp.Segment.Directories) + fileCtr += len(resp.Segment.Files) + for _, dir := range resp.Segment.Directories { + _require.NotNil(dir.Name) + _require.NotNil(dir.ID) + _require.NotNil(dir.Attributes) + _require.NotNil(dir.PermissionKey) + _require.NotNil(dir.Properties.ETag) + _require.NotNil(dir.Properties.ChangeTime) + _require.NotNil(dir.Properties.CreationTime) + _require.Nil(dir.Properties.ContentLength) + } + for _, f := range resp.Segment.Files { + _require.NotNil(f.Name) + _require.NotNil(f.ID) + _require.NotNil(f.Attributes) + _require.NotNil(f.PermissionKey) + _require.NotNil(f.Properties.ETag) + _require.NotNil(f.Properties.ChangeTime) + _require.NotNil(f.Properties.CreationTime) + _require.NotNil(f.Properties.ContentLength) + _require.Equal(*f.Properties.ContentLength, int64(0)) + } + } + _require.Equal(dirCtr, 5) + _require.Equal(fileCtr, 5) +} + +func (d *DirectoryRecordedTestsSuite) TestDirListHandlesDefault() { + _require := require.New(d.T()) + testName := d.T().Name() + + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirClient := testcommon.CreateNewDirectory(context.Background(), _require, testcommon.GenerateDirectoryName(testName), shareClient) + + resp, err := dirClient.ListHandles(context.Background(), nil) + _require.NoError(err) + _require.Len(resp.Handles, 0) + _require.NotNil(resp.NextMarker) + _require.Equal(*resp.NextMarker, "") +} + +func (d *DirectoryRecordedTestsSuite) TestDirForceCloseHandlesDefault() { + _require := require.New(d.T()) + testName := d.T().Name() + + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirClient := testcommon.CreateNewDirectory(context.Background(), _require, testcommon.GenerateDirectoryName(testName), shareClient) + + resp, err := dirClient.ForceCloseHandles(context.Background(), "*", nil) + _require.NoError(err) + _require.EqualValues(*resp.NumberOfHandlesClosed, 0) + _require.EqualValues(*resp.NumberOfHandlesFailedToClose, 0) + _require.Nil(resp.Marker) +} + +func (d *DirectoryRecordedTestsSuite) TestDirectoryCreateNegativeWithoutSAS() { + _require := require.New(d.T()) + testName := d.T().Name() + + accountName, _ := testcommon.GetGenericAccountInfo(testcommon.TestAccountDefault) + _require.Greater(len(accountName), 0) + + svcClient, err := testcommon.GetServiceClient(d.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + dirURL := "https://" + accountName + ".file.core.windows.net/" + shareName + "/" + dirName + + options := &directory.ClientOptions{} + testcommon.SetClientOptions(d.T(), &options.ClientOptions) + dirClient, err := directory.NewClientWithNoCredential(dirURL, nil) + _require.NoError(err) + + _, err = dirClient.Create(context.Background(), nil) + _require.Error(err) +} diff --git a/sdk/storage/azfile/directory/constants.go b/sdk/storage/azfile/directory/constants.go new file mode 100644 index 000000000000..2b16931bbc56 --- /dev/null +++ b/sdk/storage/azfile/directory/constants.go @@ -0,0 +1,24 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package directory + +import "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" + +// ListFilesIncludeType defines values for ListFilesIncludeType +type ListFilesIncludeType = generated.ListFilesIncludeType + +const ( + ListFilesIncludeTypeTimestamps ListFilesIncludeType = generated.ListFilesIncludeTypeTimestamps + ListFilesIncludeTypeETag ListFilesIncludeType = generated.ListFilesIncludeTypeEtag + ListFilesIncludeTypeAttributes ListFilesIncludeType = generated.ListFilesIncludeTypeAttributes + ListFilesIncludeTypePermissionKey ListFilesIncludeType = generated.ListFilesIncludeTypePermissionKey +) + +// PossibleListFilesIncludeTypeValues returns the possible values for the ListFilesIncludeType const type. +func PossibleListFilesIncludeTypeValues() []ListFilesIncludeType { + return generated.PossibleListFilesIncludeTypeValues() +} diff --git a/sdk/storage/azfile/directory/examples_test.go b/sdk/storage/azfile/directory/examples_test.go new file mode 100644 index 000000000000..0d355ff82191 --- /dev/null +++ b/sdk/storage/azfile/directory/examples_test.go @@ -0,0 +1,193 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package directory_test + +import ( + "context" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/directory" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/testcommon" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/service" + "log" + "os" + "time" +) + +func handleError(err error) { + if err != nil { + log.Fatal(err.Error()) + } +} + +func Example_client_NewClient() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + serviceURL := fmt.Sprintf("https://%s.file.core.windows.net/", accountName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + client, err := service.NewClientWithSharedKeyCredential(serviceURL, cred, nil) + handleError(err) + + shareClient := client.NewShareClient("testShare") + + dirClient := shareClient.NewDirectoryClient("testDir") + fmt.Println(dirClient.URL()) + +} + +func Example_directory_NewClientFromConnectionString() { + // Your connection string can be obtained from the Azure Portal. + connectionString, ok := os.LookupEnv("AZURE_STORAGE_CONNECTION_STRING") + if !ok { + log.Fatal("the environment variable 'AZURE_STORAGE_CONNECTION_STRING' could not be found") + } + shareName := "testShare" + dirName := "testDirectory" + dirClient, err := directory.NewClientFromConnectionString(connectionString, shareName, dirName, nil) + handleError(err) + fmt.Println(dirClient.URL()) +} + +func Example_directoryClient_Create() { + // Your connection string can be obtained from the Azure Portal. + connectionString, ok := os.LookupEnv("AZURE_STORAGE_CONNECTION_STRING") + if !ok { + log.Fatal("the environment variable 'AZURE_STORAGE_CONNECTION_STRING' could not be found") + } + shareName := "testShare" + dirName := "testDirectory" + dirClient, err := directory.NewClientFromConnectionString(connectionString, shareName, dirName, nil) + handleError(err) + _, err = dirClient.Create(context.Background(), nil) + handleError(err) + fmt.Println("Directory created") + + _, err = dirClient.Delete(context.Background(), nil) + handleError(err) + fmt.Println("Directory deleted") +} + +func Example_directoryClient_SetProperties() { + // Your connection string can be obtained from the Azure Portal. + connectionString, ok := os.LookupEnv("AZURE_STORAGE_CONNECTION_STRING") + if !ok { + log.Fatal("the environment variable 'AZURE_STORAGE_CONNECTION_STRING' could not be found") + } + shareName := "testShare" + dirName := "testDirectory" + dirClient, err := directory.NewClientFromConnectionString(connectionString, shareName, dirName, nil) + handleError(err) + _, err = dirClient.Create(context.Background(), nil) + handleError(err) + fmt.Println("Directory created") + + creationTime := time.Now().Add(5 * time.Minute).Round(time.Microsecond) + lastWriteTime := time.Now().Add(10 * time.Minute).Round(time.Millisecond) + + // Set the custom permissions + _, err = dirClient.SetProperties(context.Background(), &directory.SetPropertiesOptions{ + FileSMBProperties: &file.SMBProperties{ + Attributes: &file.NTFSFileAttributes{ + ReadOnly: true, + System: true, + }, + CreationTime: &creationTime, + LastWriteTime: &lastWriteTime, + }, + FilePermissions: &file.Permissions{ + Permission: &testcommon.SampleSDDL, + }, + }) + handleError(err) + fmt.Println("Directory properties set") + + _, err = dirClient.GetProperties(context.Background(), nil) + handleError(err) + fmt.Println("Directory properties retrieved") + + _, err = dirClient.Delete(context.Background(), nil) + handleError(err) + fmt.Println("Directory deleted") +} + +func Example_directoryClient_ListFilesAndDirectoriesSegment() { + connectionString, ok := os.LookupEnv("AZURE_STORAGE_CONNECTION_STRING") + if !ok { + log.Fatal("the environment variable 'AZURE_STORAGE_CONNECTION_STRING' could not be found") + } + shareName := "testShare" + parentDirName := "testParentDirectory" + childDirName := "testChildDirectory" + parentDirClient, err := directory.NewClientFromConnectionString(connectionString, shareName, parentDirName, nil) + handleError(err) + _, err = parentDirClient.Create(context.Background(), nil) + handleError(err) + fmt.Println("Parent directory created") + + childDirClient := parentDirClient.NewSubdirectoryClient(childDirName) + _, err = childDirClient.Create(context.Background(), nil) + handleError(err) + fmt.Println("Child directory created") + + pager := parentDirClient.NewListFilesAndDirectoriesPager(nil) + for pager.More() { + resp, err := pager.NextPage(context.Background()) + handleError(err) // if err is not nil, break the loop. + for _, _dir := range resp.Segment.Directories { + fmt.Printf("%v", _dir) + } + } + + _, err = childDirClient.Delete(context.Background(), nil) + handleError(err) + fmt.Println("Child directory deleted") + + _, err = parentDirClient.Delete(context.Background(), nil) + handleError(err) + fmt.Println("Parent directory deleted") +} + +func Example_directoryClient_SetMetadata() { + // Your connection string can be obtained from the Azure Portal. + connectionString, ok := os.LookupEnv("AZURE_STORAGE_CONNECTION_STRING") + if !ok { + log.Fatal("the environment variable 'AZURE_STORAGE_CONNECTION_STRING' could not be found") + } + shareName := "testShare" + dirName := "testDirectory" + dirClient, err := directory.NewClientFromConnectionString(connectionString, shareName, dirName, nil) + handleError(err) + _, err = dirClient.Create(context.Background(), nil) + handleError(err) + fmt.Println("Directory created") + + md := map[string]*string{ + "Foo": to.Ptr("FooValuE"), + "Bar": to.Ptr("bArvaLue"), + } + + _, err = dirClient.SetMetadata(context.Background(), &directory.SetMetadataOptions{ + Metadata: md, + }) + handleError(err) + fmt.Println("Directory metadata set") + + _, err = dirClient.Delete(context.Background(), nil) + handleError(err) + fmt.Println("Directory deleted") +} diff --git a/sdk/storage/azfile/directory/models.go b/sdk/storage/azfile/directory/models.go new file mode 100644 index 000000000000..950cf1a91d63 --- /dev/null +++ b/sdk/storage/azfile/directory/models.go @@ -0,0 +1,255 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package directory + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/shared" + "reflect" +) + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential = exported.SharedKeyCredential + +// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the +// storage account's name and either its primary or secondary key. +func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) { + return exported.NewSharedKeyCredential(accountName, accountKey) +} + +// --------------------------------------------------------------------------------------------------------------------- + +// CreateOptions contains the optional parameters for the Client.Create method. +type CreateOptions struct { + // The default value is 'Directory' for Attributes and 'now' for CreationTime and LastWriteTime fields in file.SMBProperties. + FileSMBProperties *file.SMBProperties + // The default value is 'inherit' for Permission field in file.Permissions. + FilePermissions *file.Permissions + // A name-value pair to associate with a file storage object. + Metadata map[string]*string +} + +func (o *CreateOptions) format() (fileAttributes string, fileCreationTime string, fileLastWriteTime string, createOptions *generated.DirectoryClientCreateOptions) { + if o == nil { + return shared.FileAttributesDirectory, shared.DefaultCurrentTimeString, shared.DefaultCurrentTimeString, &generated.DirectoryClientCreateOptions{ + FilePermission: to.Ptr(shared.DefaultFilePermissionString), + } + } + + fileAttributes, fileCreationTime, fileLastWriteTime = o.FileSMBProperties.Format(true, shared.FileAttributesDirectory, shared.DefaultCurrentTimeString) + + permission, permissionKey := o.FilePermissions.Format(shared.DefaultFilePermissionString) + + createOptions = &generated.DirectoryClientCreateOptions{ + FilePermission: permission, + FilePermissionKey: permissionKey, + Metadata: o.Metadata, + } + + return +} + +// --------------------------------------------------------------------------------------------------------------------- + +// DeleteOptions contains the optional parameters for the Client.Delete method. +type DeleteOptions struct { + // placeholder for future options +} + +func (o *DeleteOptions) format() *generated.DirectoryClientDeleteOptions { + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetPropertiesOptions contains the optional parameters for the Client.GetProperties method. +type GetPropertiesOptions struct { + // ShareSnapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query for the directory properties. + ShareSnapshot *string +} + +func (o *GetPropertiesOptions) format() *generated.DirectoryClientGetPropertiesOptions { + if o == nil { + return nil + } + + return &generated.DirectoryClientGetPropertiesOptions{ + Sharesnapshot: o.ShareSnapshot, + } +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetPropertiesOptions contains the optional parameters for the Client.SetProperties method. +type SetPropertiesOptions struct { + // The default value is 'preserve' for Attributes, CreationTime and LastWriteTime fields in file.SMBProperties. + FileSMBProperties *file.SMBProperties + // The default value is 'preserve' for Permission field in file.Permissions. + FilePermissions *file.Permissions +} + +func (o *SetPropertiesOptions) format() (fileAttributes string, fileCreationTime string, fileLastWriteTime string, setPropertiesOptions *generated.DirectoryClientSetPropertiesOptions) { + if o == nil { + return shared.DefaultPreserveString, shared.DefaultPreserveString, shared.DefaultPreserveString, &generated.DirectoryClientSetPropertiesOptions{ + FilePermission: to.Ptr(shared.DefaultPreserveString), + } + } + + fileAttributes, fileCreationTime, fileLastWriteTime = o.FileSMBProperties.Format(true, shared.DefaultPreserveString, shared.DefaultPreserveString) + + permission, permissionKey := o.FilePermissions.Format(shared.DefaultPreserveString) + + setPropertiesOptions = &generated.DirectoryClientSetPropertiesOptions{ + FilePermission: permission, + FilePermissionKey: permissionKey, + } + return +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetMetadataOptions contains the optional parameters for the Client.SetMetadata method. +type SetMetadataOptions struct { + // A name-value pair to associate with a file storage object. + Metadata map[string]*string +} + +func (o *SetMetadataOptions) format() *generated.DirectoryClientSetMetadataOptions { + if o == nil { + return nil + } + + return &generated.DirectoryClientSetMetadataOptions{ + Metadata: o.Metadata, + } +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ListFilesAndDirectoriesOptions contains the optional parameters for the Client.NewListFilesAndDirectoriesPager method. +type ListFilesAndDirectoriesOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include ListFilesInclude + // Include extended information. + IncludeExtendedInfo *bool + // A string value that identifies the portion of the list to be returned with the next list operation. The operation returns + // a marker value within the response body if the list returned was not complete. + // The marker value may then be used in a subsequent call to request the next set of list items. The marker value is opaque + // to the client. + Marker *string + // Specifies the maximum number of entries to return. If the request does not specify maxresults, or specifies a value greater + // than 5,000, the server will return up to 5,000 items. + MaxResults *int32 + // Filters the results to return only entries whose name begins with the specified prefix. + Prefix *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query for the list of files and directories. + ShareSnapshot *string +} + +// ListFilesInclude specifies one or more datasets to include in the response. +type ListFilesInclude struct { + Timestamps, ETag, Attributes, PermissionKey bool +} + +func (l ListFilesInclude) format() []generated.ListFilesIncludeType { + if reflect.ValueOf(l).IsZero() { + return nil + } + + var include []generated.ListFilesIncludeType + + if l.Timestamps { + include = append(include, ListFilesIncludeTypeTimestamps) + } + if l.ETag { + include = append(include, ListFilesIncludeTypeETag) + } + if l.Attributes { + include = append(include, ListFilesIncludeTypeAttributes) + } + if l.PermissionKey { + include = append(include, ListFilesIncludeTypePermissionKey) + } + + return include +} + +// FilesAndDirectoriesListSegment - Abstract for entries that can be listed from directory. +type FilesAndDirectoriesListSegment = generated.FilesAndDirectoriesListSegment + +// Directory - A listed directory item. +type Directory = generated.Directory + +// File - A listed file item. +type File = generated.File + +// FileProperty - File properties. +type FileProperty = generated.FileProperty + +// --------------------------------------------------------------------------------------------------------------------- + +// ListHandlesOptions contains the optional parameters for the Client.ListHandles method. +type ListHandlesOptions struct { + // A string value that identifies the portion of the list to be returned with the next list operation. The operation returns + // a marker value within the response body if the list returned was not complete. + // The marker value may then be used in a subsequent call to request the next set of list items. The marker value is opaque + // to the client. + Marker *string + // Specifies the maximum number of entries to return. If the request does not specify maxresults, or specifies a value greater + // than 5,000, the server will return up to 5,000 items. + MaxResults *int32 + // Specifies operation should apply to the directory specified in the URI, its files, its subdirectories and their files. + Recursive *bool + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + ShareSnapshot *string +} + +func (o *ListHandlesOptions) format() *generated.DirectoryClientListHandlesOptions { + if o == nil { + return nil + } + + return &generated.DirectoryClientListHandlesOptions{ + Marker: o.Marker, + Maxresults: o.MaxResults, + Recursive: o.Recursive, + Sharesnapshot: o.ShareSnapshot, + } +} + +// Handle - A listed Azure Storage handle item. +type Handle = generated.Handle + +// --------------------------------------------------------------------------------------------------------------------- + +// ForceCloseHandlesOptions contains the optional parameters for the Client.ForceCloseHandles method. +type ForceCloseHandlesOptions struct { + // A string value that identifies the portion of the list to be returned with the next list operation. The operation returns + // a marker value within the response body if the list returned was not complete. + // The marker value may then be used in a subsequent call to request the next set of list items. The marker value is opaque + // to the client. + Marker *string + // Specifies operation should apply to the directory specified in the URI, its files, its subdirectories and their files. + Recursive *bool + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + ShareSnapshot *string +} + +func (o *ForceCloseHandlesOptions) format() *generated.DirectoryClientForceCloseHandlesOptions { + if o == nil { + return nil + } + + return &generated.DirectoryClientForceCloseHandlesOptions{ + Marker: o.Marker, + Recursive: o.Recursive, + Sharesnapshot: o.ShareSnapshot, + } +} diff --git a/sdk/storage/azfile/directory/responses.go b/sdk/storage/azfile/directory/responses.go new file mode 100644 index 000000000000..28f2470b10ba --- /dev/null +++ b/sdk/storage/azfile/directory/responses.go @@ -0,0 +1,39 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package directory + +import "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" + +// CreateResponse contains the response from method Client.Create. +type CreateResponse = generated.DirectoryClientCreateResponse + +// DeleteResponse contains the response from method Client.Delete. +type DeleteResponse = generated.DirectoryClientDeleteResponse + +// GetPropertiesResponse contains the response from method Client.GetProperties. +type GetPropertiesResponse = generated.DirectoryClientGetPropertiesResponse + +// SetPropertiesResponse contains the response from method Client.SetProperties. +type SetPropertiesResponse = generated.DirectoryClientSetPropertiesResponse + +// SetMetadataResponse contains the response from method Client.SetMetadata. +type SetMetadataResponse = generated.DirectoryClientSetMetadataResponse + +// ListFilesAndDirectoriesResponse contains the response from method Client.NewListFilesAndDirectoriesPager. +type ListFilesAndDirectoriesResponse = generated.DirectoryClientListFilesAndDirectoriesSegmentResponse + +// ListFilesAndDirectoriesSegmentResponse - An enumeration of directories and files. +type ListFilesAndDirectoriesSegmentResponse = generated.ListFilesAndDirectoriesSegmentResponse + +// ListHandlesResponse contains the response from method Client.ListHandles. +type ListHandlesResponse = generated.DirectoryClientListHandlesResponse + +// ListHandlesSegmentResponse - An enumeration of handles. +type ListHandlesSegmentResponse = generated.ListHandlesResponse + +// ForceCloseHandlesResponse contains the response from method Client.ForceCloseHandles. +type ForceCloseHandlesResponse = generated.DirectoryClientForceCloseHandlesResponse diff --git a/sdk/storage/azfile/doc.go b/sdk/storage/azfile/doc.go new file mode 100644 index 000000000000..51d645839165 --- /dev/null +++ b/sdk/storage/azfile/doc.go @@ -0,0 +1,229 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +/* +Package azfile provides access to Azure File Storage. +For more information please see https://learn.microsoft.com/rest/api/storageservices/file-service-rest-api + +The azfile package is capable of :- + - Creating, deleting, and querying shares in an account + - Creating, deleting, and querying directories in a share + - Creating, deleting, and querying files in a share or directory + - Creating Shared Access Signature for authentication + +Types of Resources + +The azfile package allows you to interact with four types of resources :- + +* Azure storage accounts. +* Shares within those storage accounts. +* Directories within those shares. +* Files within those shares or directories. + +The Azure File Storage (azfile) client library for Go allows you to interact with each of these components through the use of a dedicated client object. +To create a client object, you will need the account's file service endpoint URL and a credential that allows you to access the account. + +Types of Credentials + +The clients support different forms of authentication. +The azfile library supports authorization via a shared key, Connection String, +or with a Shared Access Signature token. + +Using a Shared Key + +To use an account shared key (aka account key or access key), provide the key as a string. +This can be found in your storage account in the Azure Portal under the "Access Keys" section. + +Use the key as the credential parameter to authenticate the client: + + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + serviceURL := fmt.Sprintf("https://%s.file.core.windows.net/", accountName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handle(err) + + serviceClient, err := service.NewClientWithSharedKeyCredential(serviceURL, cred, nil) + handle(err) + + fmt.Println(serviceClient.URL()) + +Using a Connection String + +Depending on your use case and authorization method, you may prefer to initialize a client instance with a connection string instead of providing the account URL and credential separately. +To do this, pass the connection string to the service client's `NewClientFromConnectionString` method. +The connection string can be found in your storage account in the Azure Portal under the "Access Keys" section. + + connStr := "DefaultEndpointsProtocol=https;AccountName=;AccountKey=;EndpointSuffix=core.windows.net" + serviceClient, err := azfile.NewServiceClientFromConnectionString(connStr, nil) + handle(err) + +Using a Shared Access Signature (SAS) Token + +To use a shared access signature (SAS) token, provide the token at the end of your service URL. +You can generate a SAS token from the Azure Portal under Shared Access Signature or use the service.Client.GetSASURL() functions. + + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + serviceURL := fmt.Sprintf("https://%s.file.core.windows.net/", accountName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handle(err) + serviceClient, err := service.NewClientWithSharedKeyCredential(serviceURL, cred, nil) + handle(err) + fmt.Println(serviceClient.URL()) + + // Alternatively, you can create SAS on the fly + + resources := sas.AccountResourceTypes{Service: true} + permission := sas.AccountPermissions{Read: true} + start := time.Now() + expiry := start.AddDate(0, 0, 1) + serviceURLWithSAS, err := serviceClient.GetSASURL(resources, permission, expiry, &service.GetSASURLOptions{StartTime: &start}) + handle(err) + + serviceClientWithSAS, err := service.NewClientWithNoCredential(serviceURLWithSAS, nil) + handle(err) + + fmt.Println(serviceClientWithSAS.URL()) + +Types of Clients + +There are four different clients provided to interact with the various components of the File Service: + +1. **`ServiceClient`** + * Get and set account settings. + * Query, create, delete and restore shares within the account. + +2. **`ShareClient`** + * Get and set share access settings, properties, and metadata. + * Create, delete, and query directories and files within the share. + * `lease.ShareClient` to support share lease management. + +3. **`DirectoryClient`** + * Create or delete operations on a given directory. + * Get and set directory properties. + * List sub-directories and files within the given directory. + +3. **`FileClient`** + * Get and set file properties. + * Perform CRUD operations on a given file. + * `FileLeaseClient` to support file lease management. + +Examples + + // Your account name and key can be obtained from the Azure Portal. + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handle(err) + + // The service URL for file endpoints is usually in the form: http(s)://.file.core.windows.net/ + serviceClient, err := service.NewClientWithSharedKeyCredential(fmt.Sprintf("https://%s.file.core.windows.net/", accountName), cred, nil) + handle(err) + + // ===== 1. Create a share ===== + + // First, create a share client, and use the Create method to create a new share in your account + shareClient := serviceClient.NewShareClient("testshare") + handle(err) + + // All APIs have an options' bag struct as a parameter. + // The options' bag struct allows you to specify optional parameters such as metadata, quota, etc. + // If you want to use the default options, pass in nil. + _, err = shareClient.Create(context.TODO(), nil) + handle(err) + + // ===== 2. Create a directory ===== + + // First, create a directory client, and use the Create method to create a new directory in the share + dirClient := shareClient.NewDirectoryClient("testdir") + _, err = dirClient.Create(context.TODO(), nil) + + // ===== 3. Upload and Download a file ===== + uploadData := "Hello world!" + + // First, create a file client, and use the Create method to create a new file in the directory + fileClient := dirClient.NewFileClient("HelloWorld.txt") + _, err = fileClient.Create(context.TODO(), int64(len(uploadData)), nil) + handle(err) + + // Upload data to the file + _, err = fileClient.UploadRange(context.TODO(), 0, streaming.NopCloser(strings.NewReader(uploadData)), nil) + handle(err) + + // Download the file's contents and ensure that the download worked properly + fileDownloadResponse, err := fileClient.DownloadStream(context.TODO(), nil) + handle(err) + + // Use io.readAll to read the downloaded data. + // RetryReaderOptions has a lot of in-depth tuning abilities, but for the sake of simplicity, we'll omit those here. + reader := fileDownloadResponse.Body + downloadData, err := io.ReadAll(reader) + handle(err) + if string(downloadData) != uploadData { + handle(errors.New("uploaded data should be same as downloaded data")) + } + + if err = reader.Close(); err != nil { + handle(err) + return + } + + // ===== 3. List directories and files in a share ===== + // List methods returns a pager object which can be used to iterate over the results of a paging operation. + // To iterate over a page use the NextPage(context.Context) to fetch the next page of results. + // PageResponse() can be used to iterate over the results of the specific page. + // Always check the Err() method after paging to see if an error was returned by the pager. A pager will return either an error or the page of results. + // The below code lists the contents only for a single level of the directory hierarchy. + rootDirClient := shareClient.NewRootDirectoryClient() + pager := rootDirClient.NewListFilesAndDirectoriesPager(nil) + for pager.More() { + resp, err := pager.NextPage(context.TODO()) + handle(err) + for _, d := range resp.Segment.Directories { + fmt.Println(*d.Name) + } + for _, f := range resp.Segment.Files { + fmt.Println(*f.Name) + } + } + + // Delete the file. + _, err = fileClient.Delete(context.TODO(), nil) + handle(err) + + // Delete the directory. + _, err = dirClient.Delete(context.TODO(), nil) + handle(err) + + // Delete the share. + _, err = shareClient.Delete(context.TODO(), nil) + handle(err) +*/ + +package azfile diff --git a/sdk/storage/azfile/file/chunkwriting.go b/sdk/storage/azfile/file/chunkwriting.go new file mode 100644 index 000000000000..21070c19bcad --- /dev/null +++ b/sdk/storage/azfile/file/chunkwriting.go @@ -0,0 +1,189 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package file + +import ( + "bytes" + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "io" + "sync" +) + +// chunkWriter provides methods to upload chunks that represent a file to a server. +// This allows us to provide a local implementation that fakes the server for hermetic testing. +type chunkWriter interface { + UploadRange(context.Context, int64, io.ReadSeekCloser, *UploadRangeOptions) (UploadRangeResponse, error) +} + +// bufferManager provides an abstraction for the management of buffers. +// this is mostly for testing purposes, but does allow for different implementations without changing the algorithm. +type bufferManager[T ~[]byte] interface { + // Acquire returns the channel that contains the pool of buffers. + Acquire() <-chan T + + // Release releases the buffer back to the pool for reuse/cleanup. + Release(T) + + // Grow grows the number of buffers, up to the predefined max. + // It returns the total number of buffers or an error. + // No error is returned if the number of buffers has reached max. + // This is called only from the reading goroutine. + Grow() (int, error) + + // Free cleans up all buffers. + Free() +} + +// copyFromReader copies a source io.Reader to file storage using concurrent uploads. +func copyFromReader[T ~[]byte](ctx context.Context, src io.Reader, dst chunkWriter, options UploadStreamOptions, getBufferManager func(maxBuffers int, bufferSize int64) bufferManager[T]) error { + options.setDefaults() + + wg := sync.WaitGroup{} // Used to know when all outgoing chunks have finished processing + errCh := make(chan error, 1) // contains the first error encountered during processing + var err error + + buffers := getBufferManager(options.Concurrency, options.ChunkSize) + defer buffers.Free() + + // this controls the lifetime of the uploading goroutines. + // if an error is encountered, cancel() is called which will terminate all uploads. + // NOTE: the ordering is important here. cancel MUST execute before + // cleaning up the buffers so that any uploading goroutines exit first, + // releasing their buffers back to the pool for cleanup. + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // This goroutine grabs a buffer, reads from the stream into the buffer, + // then creates a goroutine to upload/stage the chunk. + for chunkNum := uint32(0); true; chunkNum++ { + var buffer T + select { + case buffer = <-buffers.Acquire(): + // got a buffer + default: + // no buffer available; allocate a new buffer if possible + if _, err := buffers.Grow(); err != nil { + return err + } + + // either grab the newly allocated buffer or wait for one to become available + buffer = <-buffers.Acquire() + } + + var n int + n, err = io.ReadFull(src, buffer) + + if n > 0 { + // some data was read, upload it + wg.Add(1) // We're posting a buffer to be sent + + // NOTE: we must pass chunkNum as an arg to our goroutine else + // it's captured by reference and can change underneath us! + go func(chunkNum uint32) { + // Upload the outgoing chunk, matching the number of bytes read + offset := int64(chunkNum) * options.ChunkSize + uploadRangeOptions := options.getUploadRangeOptions() + _, err := dst.UploadRange(ctx, offset, streaming.NopCloser(bytes.NewReader(buffer[:n])), uploadRangeOptions) + if err != nil { + select { + case errCh <- err: + // error was set + default: + // some other error is already set + } + cancel() + } + buffers.Release(buffer) // The goroutine reading from the stream can reuse this buffer now + + // signal that the chunk has been staged. + // we MUST do this after attempting to write to errCh + // to avoid it racing with the reading goroutine. + wg.Done() + }(chunkNum) + } else { + // nothing was read so the buffer is empty, send it back for reuse/clean-up. + buffers.Release(buffer) + } + + if err != nil { // The reader is done, no more outgoing buffers + if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) { + // these are expected errors, we don't surface those + err = nil + } else { + // some other error happened, terminate any outstanding uploads + cancel() + } + break + } + } + + wg.Wait() // Wait for all outgoing chunks to complete + + if err != nil { + // there was an error reading from src, favor this error over any error during staging + return err + } + + select { + case err = <-errCh: + // there was an error during staging + return err + default: + // no error was encountered + } + + // All chunks uploaded, return nil error + return nil +} + +// mmbPool implements the bufferManager interface. +// it uses anonymous memory mapped files for buffers. +// don't use this type directly, use newMMBPool() instead. +type mmbPool struct { + buffers chan mmb + count int + max int + size int64 +} + +func newMMBPool(maxBuffers int, bufferSize int64) bufferManager[mmb] { + return &mmbPool{ + buffers: make(chan mmb, maxBuffers), + max: maxBuffers, + size: bufferSize, + } +} + +func (pool *mmbPool) Acquire() <-chan mmb { + return pool.buffers +} + +func (pool *mmbPool) Grow() (int, error) { + if pool.count < pool.max { + buffer, err := newMMB(pool.size) + if err != nil { + return 0, err + } + pool.buffers <- buffer + pool.count++ + } + return pool.count, nil +} + +func (pool *mmbPool) Release(buffer mmb) { + pool.buffers <- buffer +} + +func (pool *mmbPool) Free() { + for i := 0; i < pool.count; i++ { + buffer := <-pool.buffers + buffer.delete() + } + pool.count = 0 +} diff --git a/sdk/storage/azfile/file/client.go b/sdk/storage/azfile/file/client.go new file mode 100644 index 000000000000..432f8ae379a1 --- /dev/null +++ b/sdk/storage/azfile/file/client.go @@ -0,0 +1,505 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package file + +import ( + "bytes" + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/fileerror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/sas" + "io" + "os" + "strings" + "sync" + "time" +) + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions base.ClientOptions + +// Client represents a URL to the Azure Storage file. +type Client base.Client[generated.FileClient] + +// NewClientWithNoCredential creates an instance of Client with the specified values. +// This is used to anonymously access a file or with a shared access signature (SAS) token. +// - fileURL - the URL of the file e.g. https://.file.core.windows.net/share/directoryPath/file? +// - options - client options; pass nil to accept the default values +// +// The directoryPath is optional in the fileURL. If omitted, it points to file within the specified share. +func NewClientWithNoCredential(fileURL string, options *ClientOptions) (*Client, error) { + conOptions := shared.GetClientOptions(options) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewFileClient(fileURL, pl, nil)), nil +} + +// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. +// - fileURL - the URL of the file e.g. https://.file.core.windows.net/share/directoryPath/file +// - cred - a SharedKeyCredential created with the matching file's storage account and access key +// - options - client options; pass nil to accept the default values +// +// The directoryPath is optional in the fileURL. If omitted, it points to file within the specified share. +func NewClientWithSharedKeyCredential(fileURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { + authPolicy := exported.NewSharedKeyCredPolicy(cred) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewFileClient(fileURL, pl, cred)), nil +} + +// NewClientFromConnectionString creates an instance of Client with the specified values. +// - connectionString - a connection string for the desired storage account +// - shareName - the name of the share within the storage account +// - filePath - the path of the file within the share +// - options - client options; pass nil to accept the default values +func NewClientFromConnectionString(connectionString string, shareName string, filePath string, options *ClientOptions) (*Client, error) { + parsed, err := shared.ParseConnectionString(connectionString) + if err != nil { + return nil, err + } + + filePath = strings.ReplaceAll(filePath, "\\", "/") + parsed.ServiceURL = runtime.JoinPaths(parsed.ServiceURL, shareName, filePath) + + if parsed.AccountKey != "" && parsed.AccountName != "" { + credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey) + if err != nil { + return nil, err + } + return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options) + } + + return NewClientWithNoCredential(parsed.ServiceURL, options) +} + +func (f *Client) generated() *generated.FileClient { + return base.InnerClient((*base.Client[generated.FileClient])(f)) +} + +func (f *Client) sharedKey() *SharedKeyCredential { + return base.SharedKey((*base.Client[generated.FileClient])(f)) +} + +// URL returns the URL endpoint used by the Client object. +func (f *Client) URL() string { + return f.generated().Endpoint() +} + +// Create operation creates a new file or replaces a file. Note it only initializes the file with no content. +// - fileContentLength: Specifies the maximum size for the file in bytes, up to 4 TB. +// +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/create-file. +func (f *Client) Create(ctx context.Context, fileContentLength int64, options *CreateOptions) (CreateResponse, error) { + fileAttributes, fileCreationTime, fileLastWriteTime, fileCreateOptions, fileHTTPHeaders, leaseAccessConditions := options.format() + resp, err := f.generated().Create(ctx, fileContentLength, fileAttributes, fileCreationTime, fileLastWriteTime, fileCreateOptions, fileHTTPHeaders, leaseAccessConditions) + return resp, err +} + +// Delete operation removes the file from the storage account. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/delete-file2. +func (f *Client) Delete(ctx context.Context, options *DeleteOptions) (DeleteResponse, error) { + opts, leaseAccessConditions := options.format() + resp, err := f.generated().Delete(ctx, opts, leaseAccessConditions) + return resp, err +} + +// GetProperties operation returns all user-defined metadata, standard HTTP properties, and system properties for the file. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-file-properties. +func (f *Client) GetProperties(ctx context.Context, options *GetPropertiesOptions) (GetPropertiesResponse, error) { + opts, leaseAccessConditions := options.format() + resp, err := f.generated().GetProperties(ctx, opts, leaseAccessConditions) + return resp, err +} + +// SetHTTPHeaders operation sets HTTP headers on the file. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/set-file-properties. +func (f *Client) SetHTTPHeaders(ctx context.Context, options *SetHTTPHeadersOptions) (SetHTTPHeadersResponse, error) { + fileAttributes, fileCreationTime, fileLastWriteTime, opts, fileHTTPHeaders, leaseAccessConditions := options.format() + resp, err := f.generated().SetHTTPHeaders(ctx, fileAttributes, fileCreationTime, fileLastWriteTime, opts, fileHTTPHeaders, leaseAccessConditions) + return resp, err +} + +// SetMetadata operation sets user-defined metadata for the specified file. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/set-file-metadata. +func (f *Client) SetMetadata(ctx context.Context, options *SetMetadataOptions) (SetMetadataResponse, error) { + opts, leaseAccessConditions := options.format() + resp, err := f.generated().SetMetadata(ctx, opts, leaseAccessConditions) + return resp, err +} + +// StartCopyFromURL operation copies the data at the source URL to a file. +// - copySource: specifies the URL of the source file or blob, up to 2KiB in length. +// +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/copy-file. +func (f *Client) StartCopyFromURL(ctx context.Context, copySource string, options *StartCopyFromURLOptions) (StartCopyFromURLResponse, error) { + opts, copyFileSmbInfo, leaseAccessConditions := options.format() + resp, err := f.generated().StartCopy(ctx, copySource, opts, copyFileSmbInfo, leaseAccessConditions) + return resp, err +} + +// AbortCopy operation cancels a pending Copy File operation, and leaves a destination file with zero length and full metadata. +// - copyID: the copy identifier provided in the x-ms-copy-id header of the original Copy File operation. +// +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/abort-copy-file. +func (f *Client) AbortCopy(ctx context.Context, copyID string, options *AbortCopyOptions) (AbortCopyResponse, error) { + opts, leaseAccessConditions := options.format() + resp, err := f.generated().AbortCopy(ctx, copyID, opts, leaseAccessConditions) + return resp, err +} + +// Resize operation resizes the file to the specified size. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/set-file-properties. +func (f *Client) Resize(ctx context.Context, size int64, options *ResizeOptions) (ResizeResponse, error) { + fileAttributes, fileCreationTime, fileLastWriteTime, opts, leaseAccessConditions := options.format(size) + resp, err := f.generated().SetHTTPHeaders(ctx, fileAttributes, fileCreationTime, fileLastWriteTime, opts, nil, leaseAccessConditions) + return resp, err +} + +// UploadRange operation uploads a range of bytes to a file. +// - offset: Specifies the start byte at which the range of bytes is to be written. +// - body: Specifies the data to be uploaded. +// +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/put-range. +func (f *Client) UploadRange(ctx context.Context, offset int64, body io.ReadSeekCloser, options *UploadRangeOptions) (UploadRangeResponse, error) { + rangeParam, contentLength, uploadRangeOptions, leaseAccessConditions, err := options.format(offset, body) + if err != nil { + return UploadRangeResponse{}, err + } + + resp, err := f.generated().UploadRange(ctx, rangeParam, RangeWriteTypeUpdate, contentLength, body, uploadRangeOptions, leaseAccessConditions) + return resp, err +} + +// ClearRange operation clears the specified range and releases the space used in storage for that range. +// - contentRange: Specifies the range of bytes to be cleared. +// +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/put-range. +func (f *Client) ClearRange(ctx context.Context, contentRange HTTPRange, options *ClearRangeOptions) (ClearRangeResponse, error) { + rangeParam, leaseAccessConditions, err := options.format(contentRange) + if err != nil { + return ClearRangeResponse{}, err + } + + resp, err := f.generated().UploadRange(ctx, rangeParam, RangeWriteTypeClear, 0, nil, nil, leaseAccessConditions) + return resp, err +} + +// UploadRangeFromURL operation uploads a range of bytes to a file where the contents are read from a URL. +// - copySource: Specifies the URL of the source file or blob, up to 2 KB in length. +// - destinationRange: Specifies the range of bytes in the file to be written. +// - sourceRange: Bytes of source data in the specified range. +// +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/put-range-from-url. +func (f *Client) UploadRangeFromURL(ctx context.Context, copySource string, sourceOffset int64, destinationOffset int64, count int64, options *UploadRangeFromURLOptions) (UploadRangeFromURLResponse, error) { + destRange, opts, sourceModifiedAccessConditions, leaseAccessConditions, err := options.format(sourceOffset, destinationOffset, count) + if err != nil { + return UploadRangeFromURLResponse{}, err + } + + resp, err := f.generated().UploadRangeFromURL(ctx, destRange, copySource, 0, opts, sourceModifiedAccessConditions, leaseAccessConditions) + return resp, err +} + +// GetRangeList operation returns the list of valid ranges for a file. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/list-ranges. +func (f *Client) GetRangeList(ctx context.Context, options *GetRangeListOptions) (GetRangeListResponse, error) { + opts, leaseAccessConditions := options.format() + resp, err := f.generated().GetRangeList(ctx, opts, leaseAccessConditions) + return resp, err +} + +// ForceCloseHandles operation closes a handle or handles opened on a file. +// - handleID - Specifies the handle ID to be closed. Use an asterisk (*) as a wildcard string to specify all handles. +// +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/force-close-handles. +func (f *Client) ForceCloseHandles(ctx context.Context, handleID string, options *ForceCloseHandlesOptions) (ForceCloseHandlesResponse, error) { + opts := options.format() + resp, err := f.generated().ForceCloseHandles(ctx, handleID, opts) + return resp, err +} + +// ListHandles operation returns a list of open handles on a file. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/list-handles. +func (f *Client) ListHandles(ctx context.Context, options *ListHandlesOptions) (ListHandlesResponse, error) { + opts := options.format() + resp, err := f.generated().ListHandles(ctx, opts) + return resp, err +} + +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at file. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +func (f *Client) GetSASURL(permissions sas.FilePermissions, expiry time.Time, o *GetSASURLOptions) (string, error) { + if f.sharedKey() == nil { + return "", fileerror.MissingSharedKeyCredential + } + st := o.format() + + urlParts, err := ParseURL(f.URL()) + if err != nil { + return "", err + } + + qps, err := sas.SignatureValues{ + Version: sas.Version, + Protocol: sas.ProtocolHTTPS, + ShareName: urlParts.ShareName, + FilePath: urlParts.DirectoryOrFilePath, + Permissions: permissions.String(), + StartTime: st, + ExpiryTime: expiry.UTC(), + }.SignWithSharedKey(f.sharedKey()) + if err != nil { + return "", err + } + + endpoint := f.URL() + "?" + qps.Encode() + + return endpoint, nil +} + +// Concurrent Upload Functions ----------------------------------------------------------------------------------------- + +// uploadFromReader uploads a buffer in chunks to an Azure file. +func (f *Client) uploadFromReader(ctx context.Context, reader io.ReaderAt, actualSize int64, o *uploadFromReaderOptions) error { + if actualSize > MaxFileSize { + return errors.New("buffer is too large to upload to a file") + } + if o.ChunkSize == 0 { + o.ChunkSize = MaxUpdateRangeBytes + } + + if log.Should(exported.EventUpload) { + urlParts, err := ParseURL(f.URL()) + if err == nil { + log.Writef(exported.EventUpload, "file name %s actual size %v chunk-size %v chunk-count %v", + urlParts.DirectoryOrFilePath, actualSize, o.ChunkSize, ((actualSize-1)/o.ChunkSize)+1) + } + } + + progress := int64(0) + progressLock := &sync.Mutex{} + + err := shared.DoBatchTransfer(ctx, &shared.BatchTransferOptions{ + OperationName: "uploadFromReader", + TransferSize: actualSize, + ChunkSize: o.ChunkSize, + Concurrency: o.Concurrency, + Operation: func(ctx context.Context, offset int64, chunkSize int64) error { + // This function is called once per file range. + // It is passed this file's offset within the buffer and its count of bytes + // Prepare to read the proper range/section of the buffer + if chunkSize < o.ChunkSize { + // this is the last file range. Its actual size might be less + // than the calculated size due to rounding up of the payload + // size to fit in a whole number of chunks. + chunkSize = actualSize - offset + } + var body io.ReadSeeker = io.NewSectionReader(reader, offset, chunkSize) + if o.Progress != nil { + chunkProgress := int64(0) + body = streaming.NewRequestProgress(streaming.NopCloser(body), + func(bytesTransferred int64) { + diff := bytesTransferred - chunkProgress + chunkProgress = bytesTransferred + progressLock.Lock() // 1 goroutine at a time gets progress report + progress += diff + o.Progress(progress) + progressLock.Unlock() + }) + } + + uploadRangeOptions := o.getUploadRangeOptions() + _, err := f.UploadRange(ctx, offset, streaming.NopCloser(body), uploadRangeOptions) + return err + }, + }) + return err +} + +// UploadBuffer uploads a buffer in chunks to an Azure file. +func (f *Client) UploadBuffer(ctx context.Context, buffer []byte, options *UploadBufferOptions) error { + uploadOptions := uploadFromReaderOptions{} + if options != nil { + uploadOptions = *options + } + return f.uploadFromReader(ctx, bytes.NewReader(buffer), int64(len(buffer)), &uploadOptions) +} + +// UploadFile uploads a file in chunks to an Azure file. +func (f *Client) UploadFile(ctx context.Context, file *os.File, options *UploadFileOptions) error { + stat, err := file.Stat() + if err != nil { + return err + } + uploadOptions := uploadFromReaderOptions{} + if options != nil { + uploadOptions = *options + } + return f.uploadFromReader(ctx, file, stat.Size(), &uploadOptions) +} + +// UploadStream copies the file held in io.Reader to the file at fileClient. +// A Context deadline or cancellation will cause this to error. +func (f *Client) UploadStream(ctx context.Context, body io.Reader, options *UploadStreamOptions) error { + if options == nil { + options = &UploadStreamOptions{} + } + + err := copyFromReader(ctx, body, f, *options, newMMBPool) + return err +} + +// Concurrent Download Functions ----------------------------------------------------------------------------------------- + +// download method downloads an Azure file to a WriterAt in parallel. +func (f *Client) download(ctx context.Context, writer io.WriterAt, o downloadOptions) (int64, error) { + if o.ChunkSize == 0 { + o.ChunkSize = DefaultDownloadChunkSize + } + + count := o.Range.Count + if count == CountToEnd { // If size not specified, calculate it + // If we don't have the length at all, get it + getFilePropertiesOptions := o.getFilePropertiesOptions() + gr, err := f.GetProperties(ctx, getFilePropertiesOptions) + if err != nil { + return 0, err + } + count = *gr.ContentLength - o.Range.Offset + } + + if count <= 0 { + // The file is empty, there is nothing to download. + return 0, nil + } + + // Prepare and do parallel download. + progress := int64(0) + progressLock := &sync.Mutex{} + + err := shared.DoBatchTransfer(ctx, &shared.BatchTransferOptions{ + OperationName: "downloadFileToWriterAt", + TransferSize: count, + ChunkSize: o.ChunkSize, + Concurrency: o.Concurrency, + Operation: func(ctx context.Context, chunkStart int64, count int64) error { + downloadFileOptions := o.getDownloadFileOptions(HTTPRange{ + Offset: chunkStart + o.Range.Offset, + Count: count, + }) + dr, err := f.DownloadStream(ctx, downloadFileOptions) + if err != nil { + return err + } + var body io.ReadCloser = dr.NewRetryReader(ctx, &o.RetryReaderOptionsPerChunk) + if o.Progress != nil { + rangeProgress := int64(0) + body = streaming.NewResponseProgress( + body, + func(bytesTransferred int64) { + diff := bytesTransferred - rangeProgress + rangeProgress = bytesTransferred + progressLock.Lock() + progress += diff + o.Progress(progress) + progressLock.Unlock() + }) + } + _, err = io.Copy(shared.NewSectionWriter(writer, chunkStart, count), body) + if err != nil { + return err + } + err = body.Close() + return err + }, + }) + if err != nil { + return 0, err + } + return count, nil +} + +// DownloadStream operation reads or downloads a file from the system, including its metadata and properties. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-file. +func (f *Client) DownloadStream(ctx context.Context, options *DownloadStreamOptions) (DownloadStreamResponse, error) { + opts, leaseAccessConditions := options.format() + if options == nil { + options = &DownloadStreamOptions{} + } + + resp, err := f.generated().Download(ctx, opts, leaseAccessConditions) + if err != nil { + return DownloadStreamResponse{}, err + } + + return DownloadStreamResponse{ + DownloadResponse: resp, + client: f, + getInfo: httpGetterInfo{Range: options.Range}, + leaseAccessConditions: options.LeaseAccessConditions, + }, err +} + +// DownloadBuffer downloads an Azure file to a buffer with parallel. +func (f *Client) DownloadBuffer(ctx context.Context, buffer []byte, o *DownloadBufferOptions) (int64, error) { + if o == nil { + o = &DownloadBufferOptions{} + } + + return f.download(ctx, shared.NewBytesWriter(buffer), (downloadOptions)(*o)) +} + +// DownloadFile downloads an Azure file to a local file. +// The file would be truncated if the size doesn't match. +func (f *Client) DownloadFile(ctx context.Context, file *os.File, o *DownloadFileOptions) (int64, error) { + if o == nil { + o = &DownloadFileOptions{} + } + do := (*downloadOptions)(o) + + // 1. Calculate the size of the destination file + var size int64 + + count := do.Range.Count + if count == CountToEnd { + // Try to get Azure file's size + getFilePropertiesOptions := do.getFilePropertiesOptions() + props, err := f.GetProperties(ctx, getFilePropertiesOptions) + if err != nil { + return 0, err + } + size = *props.ContentLength - do.Range.Offset + } else { + size = count + } + + // 2. Compare and try to resize local file's size if it doesn't match Azure file's size. + stat, err := file.Stat() + if err != nil { + return 0, err + } + if stat.Size() != size { + if err = file.Truncate(size); err != nil { + return 0, err + } + } + + if size > 0 { + return f.download(ctx, file, *do) + } else { // if the file's size is 0, there is no need in downloading it + return 0, nil + } +} diff --git a/sdk/storage/azfile/file/client_test.go b/sdk/storage/azfile/file/client_test.go new file mode 100644 index 000000000000..f04191104e1a --- /dev/null +++ b/sdk/storage/azfile/file/client_test.go @@ -0,0 +1,3121 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package file_test + +import ( + "bytes" + "context" + "crypto/md5" + "crypto/rand" + "encoding/binary" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/recording" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/fileerror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/testcommon" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/sas" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/share" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "hash/crc64" + "io" + "io/ioutil" + "net/http" + "os" + "strings" + "testing" + "time" +) + +func Test(t *testing.T) { + recordMode := recording.GetRecordMode() + t.Logf("Running file Tests in %s mode\n", recordMode) + if recordMode == recording.LiveMode { + suite.Run(t, &FileRecordedTestsSuite{}) + suite.Run(t, &FileUnrecordedTestsSuite{}) + } else if recordMode == recording.PlaybackMode { + suite.Run(t, &FileRecordedTestsSuite{}) + } else if recordMode == recording.RecordingMode { + suite.Run(t, &FileRecordedTestsSuite{}) + } +} + +func (f *FileRecordedTestsSuite) BeforeTest(suite string, test string) { + testcommon.BeforeTest(f.T(), suite, test) +} + +func (f *FileRecordedTestsSuite) AfterTest(suite string, test string) { + testcommon.AfterTest(f.T(), suite, test) +} + +func (f *FileUnrecordedTestsSuite) BeforeTest(suite string, test string) { + +} + +func (f *FileUnrecordedTestsSuite) AfterTest(suite string, test string) { + +} + +type FileRecordedTestsSuite struct { + suite.Suite +} + +type FileUnrecordedTestsSuite struct { + suite.Suite +} + +func (f *FileRecordedTestsSuite) TestFileNewFileClient() { + _require := require.New(f.T()) + testName := f.T().Name() + + accountName, _ := testcommon.GetGenericAccountInfo(testcommon.TestAccountDefault) + _require.Greater(len(accountName), 0) + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := svcClient.NewShareClient(shareName) + + dirName := testcommon.GenerateDirectoryName(testName) + dirClient := shareClient.NewDirectoryClient(dirName) + + fileName := testcommon.GenerateFileName(testName) + fileClient := dirClient.NewFileClient(fileName) + + correctURL := "https://" + accountName + ".file.core.windows.net/" + shareName + "/" + dirName + "/" + fileName + _require.Equal(fileClient.URL(), correctURL) + + rootFileClient := shareClient.NewRootDirectoryClient().NewFileClient(fileName) + + correctURL = "https://" + accountName + ".file.core.windows.net/" + shareName + "/" + fileName + _require.Equal(rootFileClient.URL(), correctURL) +} + +func (f *FileRecordedTestsSuite) TestFileCreateUsingSharedKey() { + _require := require.New(f.T()) + testName := f.T().Name() + + cred, err := testcommon.GetGenericSharedKeyCredential(testcommon.TestAccountDefault) + _require.NoError(err) + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + fileName := testcommon.GenerateFileName(testName) + fileURL := "https://" + cred.AccountName() + ".file.core.windows.net/" + shareName + "/" + dirName + "/" + fileName + + options := &file.ClientOptions{} + testcommon.SetClientOptions(f.T(), &options.ClientOptions) + fileClient, err := file.NewClientWithSharedKeyCredential(fileURL, cred, options) + _require.NoError(err) + + // creating file where directory does not exist gives ParentNotFound error + _, err = fileClient.Create(context.Background(), 1024, nil) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.ParentNotFound) + + testcommon.CreateNewDirectory(context.Background(), _require, dirName, shareClient) + + resp, err := fileClient.Create(context.Background(), 1024, nil) + _require.NoError(err) + _require.NotNil(resp.ETag) + _require.NotNil(resp.RequestID) + _require.Equal(resp.LastModified.IsZero(), false) + _require.Equal(resp.FileCreationTime.IsZero(), false) + _require.Equal(resp.FileLastWriteTime.IsZero(), false) + _require.Equal(resp.FileChangeTime.IsZero(), false) +} + +func (f *FileRecordedTestsSuite) TestFileCreateUsingConnectionString() { + _require := require.New(f.T()) + testName := f.T().Name() + + connString, err := testcommon.GetGenericConnectionString(testcommon.TestAccountDefault) + _require.NoError(err) + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + fileName := testcommon.GenerateFileName(testName) + options := &file.ClientOptions{} + testcommon.SetClientOptions(f.T(), &options.ClientOptions) + fileClient1, err := file.NewClientFromConnectionString(*connString, shareName, fileName, options) + _require.NoError(err) + + resp, err := fileClient1.Create(context.Background(), 1024, nil) + _require.NoError(err) + _require.NotNil(resp.ETag) + _require.NotNil(resp.RequestID) + _require.Equal(resp.LastModified.IsZero(), false) + _require.Equal(resp.FileCreationTime.IsZero(), false) + _require.Equal(resp.FileLastWriteTime.IsZero(), false) + _require.Equal(resp.FileChangeTime.IsZero(), false) + + filePath := dirName + "/" + fileName + fileClient2, err := file.NewClientFromConnectionString(*connString, shareName, filePath, options) + _require.NoError(err) + + _, err = fileClient2.Create(context.Background(), 1024, nil) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.ParentNotFound) + + testcommon.CreateNewDirectory(context.Background(), _require, dirName, shareClient) + + // using '\' as path separator + filePath = dirName + "\\" + fileName + fileClient3, err := file.NewClientFromConnectionString(*connString, shareName, filePath, options) + _require.NoError(err) + + resp, err = fileClient3.Create(context.Background(), 1024, nil) + _require.NoError(err) + _require.NotNil(resp.RequestID) + _require.Equal(resp.LastModified.IsZero(), false) + _require.Equal(resp.FileCreationTime.IsZero(), false) +} + +func (f *FileUnrecordedTestsSuite) TestFileClientUsingSAS() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirName := testcommon.GenerateDirectoryName(testName) + dirClient := testcommon.CreateNewDirectory(context.Background(), _require, dirName, shareClient) + + fileName := testcommon.GenerateFileName(testName) + fileClient := dirClient.NewFileClient(fileName) + + permissions := sas.FilePermissions{ + Read: true, + Write: true, + Delete: true, + Create: true, + } + expiry := time.Now().Add(time.Hour) + + fileSASURL, err := fileClient.GetSASURL(permissions, expiry, nil) + _require.NoError(err) + + fileSASClient, err := file.NewClientWithNoCredential(fileSASURL, nil) + _require.NoError(err) + + _, err = fileSASClient.GetProperties(context.Background(), nil) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.ResourceNotFound) + + _, err = fileSASClient.Create(context.Background(), 1024, nil) + _require.NoError(err) + + resp, err := fileSASClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.NotNil(resp.RequestID) + _require.Equal(resp.LastModified.IsZero(), false) + _require.Equal(resp.FileCreationTime.IsZero(), false) +} + +func (f *FileRecordedTestsSuite) TestFileCreateDeleteDefault() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fileName := testcommon.GenerateFileName(testName) + rootDirClient := shareClient.NewRootDirectoryClient() + _require.NoError(err) + + fClient := rootDirClient.NewFileClient(fileName) + + // Create and delete file in root directory. + cResp, err := fClient.Create(context.Background(), 1024, nil) + _require.NoError(err) + _require.NotNil(cResp.ETag) + _require.Equal(cResp.LastModified.IsZero(), false) + _require.NotNil(cResp.RequestID) + _require.NotNil(cResp.Version) + _require.Equal(cResp.Date.IsZero(), false) + _require.NotNil(cResp.IsServerEncrypted) + + delResp, err := fClient.Delete(context.Background(), nil) + _require.NoError(err) + _require.NotNil(delResp.RequestID) + _require.NotNil(delResp.Version) + _require.Equal(delResp.Date.IsZero(), false) + + dirClient := testcommon.CreateNewDirectory(context.Background(), _require, testcommon.GenerateDirectoryName(testName), shareClient) + + // Create and delete file in named directory. + afClient := dirClient.NewFileClient(fileName) + + cResp, err = afClient.Create(context.Background(), 1024, nil) + _require.NoError(err) + _require.NotNil(cResp.ETag) + _require.Equal(cResp.LastModified.IsZero(), false) + _require.NotNil(cResp.RequestID) + _require.NotNil(cResp.Version) + _require.Equal(cResp.Date.IsZero(), false) + _require.NotNil(cResp.IsServerEncrypted) + + delResp, err = afClient.Delete(context.Background(), nil) + _require.NoError(err) + _require.NotNil(delResp.RequestID) + _require.NotNil(delResp.Version) + _require.Equal(delResp.Date.IsZero(), false) +} + +func (f *FileRecordedTestsSuite) TestFileCreateNonDefaultMetadataNonEmpty() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient(testcommon.GenerateFileName(testName)) + + _, err = fClient.Create(context.Background(), 1024, &file.CreateOptions{ + Metadata: testcommon.BasicMetadata, + }) + _require.NoError(err) + + resp, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Len(resp.Metadata, len(testcommon.BasicMetadata)) + for k, v := range resp.Metadata { + val := testcommon.BasicMetadata[strings.ToLower(k)] + _require.NotNil(val) + _require.Equal(*v, *val) + } +} + +func (f *FileRecordedTestsSuite) TestFileCreateNonDefaultHTTPHeaders() { + _require := require.New(f.T()) + testName := f.T().Name() + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient(testcommon.GenerateFileName(testName)) + + httpHeaders := file.HTTPHeaders{ + ContentType: to.Ptr("my_type"), + ContentDisposition: to.Ptr("my_disposition"), + CacheControl: to.Ptr("control"), + ContentMD5: nil, + ContentLanguage: to.Ptr("my_language"), + ContentEncoding: to.Ptr("my_encoding"), + } + + _, err = fClient.Create(context.Background(), 1024, &file.CreateOptions{ + HTTPHeaders: &httpHeaders, + }) + _require.NoError(err) + + resp, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.EqualValues(resp.ContentType, httpHeaders.ContentType) + _require.EqualValues(resp.ContentDisposition, httpHeaders.ContentDisposition) + _require.EqualValues(resp.CacheControl, httpHeaders.CacheControl) + _require.EqualValues(resp.ContentLanguage, httpHeaders.ContentLanguage) + _require.EqualValues(resp.ContentEncoding, httpHeaders.ContentEncoding) + _require.Nil(resp.ContentMD5) +} + +func (f *FileRecordedTestsSuite) TestFileCreateNegativeMetadataInvalid() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient(testcommon.GenerateFileName(testName)) + + _, err = fClient.Create(context.Background(), 1024, &file.CreateOptions{ + Metadata: map[string]*string{"!@#$%^&*()": to.Ptr("!@#$%^&*()")}, + HTTPHeaders: &file.HTTPHeaders{}, + }) + _require.Error(err) +} + +func (f *FileUnrecordedTestsSuite) TestFileGetSetPropertiesNonDefault() { + _require := require.New(f.T()) + testName := f.T().Name() + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient(testcommon.GenerateFileName(testName)) + _, err = fClient.Create(context.Background(), 0, nil) + _require.NoError(err) + + md5Str := "MDAwMDAwMDA=" + testMd5 := []byte(md5Str) + + creationTime := time.Now().Add(-time.Hour) + lastWriteTime := time.Now().Add(-time.Minute * 15) + + options := &file.SetHTTPHeadersOptions{ + Permissions: &file.Permissions{Permission: &testcommon.SampleSDDL}, + SMBProperties: &file.SMBProperties{ + Attributes: &file.NTFSFileAttributes{Hidden: true}, + CreationTime: &creationTime, + LastWriteTime: &lastWriteTime, + }, + HTTPHeaders: &file.HTTPHeaders{ + ContentType: to.Ptr("text/html"), + ContentEncoding: to.Ptr("gzip"), + ContentLanguage: to.Ptr("en"), + ContentMD5: testMd5, + CacheControl: to.Ptr("no-transform"), + ContentDisposition: to.Ptr("attachment"), + }, + } + setResp, err := fClient.SetHTTPHeaders(context.Background(), options) + _require.NoError(err) + _require.NotNil(setResp.ETag) + _require.Equal(setResp.LastModified.IsZero(), false) + _require.NotNil(setResp.RequestID) + _require.NotNil(setResp.Version) + _require.Equal(setResp.Date.IsZero(), false) + _require.NotNil(setResp.IsServerEncrypted) + + getResp, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(setResp.LastModified.IsZero(), false) + _require.Equal(*getResp.FileType, "File") + + _require.EqualValues(getResp.ContentType, options.HTTPHeaders.ContentType) + _require.EqualValues(getResp.ContentEncoding, options.HTTPHeaders.ContentEncoding) + _require.EqualValues(getResp.ContentLanguage, options.HTTPHeaders.ContentLanguage) + _require.EqualValues(getResp.ContentMD5, options.HTTPHeaders.ContentMD5) + _require.EqualValues(getResp.CacheControl, options.HTTPHeaders.CacheControl) + _require.EqualValues(getResp.ContentDisposition, options.HTTPHeaders.ContentDisposition) + _require.Equal(*getResp.ContentLength, int64(0)) + // We'll just ensure a permission exists, no need to test overlapping functionality. + _require.NotEqual(getResp.FilePermissionKey, "") + _require.Equal(*getResp.FileAttributes, options.SMBProperties.Attributes.String()) + + _require.EqualValues((*getResp.FileCreationTime).Format(testcommon.ISO8601), creationTime.UTC().Format(testcommon.ISO8601)) + _require.EqualValues((*getResp.FileLastWriteTime).Format(testcommon.ISO8601), lastWriteTime.UTC().Format(testcommon.ISO8601)) + + _require.NotNil(getResp.ETag) + _require.NotNil(getResp.RequestID) + _require.NotNil(getResp.Version) + _require.Equal(getResp.Date.IsZero(), false) + _require.NotNil(getResp.IsServerEncrypted) +} + +func (f *FileRecordedTestsSuite) TestFileGetSetPropertiesDefault() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := testcommon.CreateNewFileFromShare(context.Background(), _require, testcommon.GenerateFileName(testName), 0, shareClient) + + setResp, err := fClient.SetHTTPHeaders(context.Background(), nil) + _require.NoError(err) + _require.NotEqual(*setResp.ETag, "") + _require.Equal(setResp.LastModified.IsZero(), false) + _require.NotEqual(setResp.RequestID, "") + _require.NotEqual(setResp.Version, "") + _require.Equal(setResp.Date.IsZero(), false) + _require.NotNil(setResp.IsServerEncrypted) + + metadata := map[string]*string{ + "Foo": to.Ptr("Foovalue"), + "Bar": to.Ptr("Barvalue"), + } + _, err = fClient.SetMetadata(context.Background(), &file.SetMetadataOptions{ + Metadata: metadata, + }) + _require.NoError(err) + + // get properties on the share snapshot + getResp, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(setResp.LastModified.IsZero(), false) + _require.Equal(*getResp.FileType, "File") + + _require.Nil(getResp.ContentType) + _require.Nil(getResp.ContentEncoding) + _require.Nil(getResp.ContentLanguage) + _require.Nil(getResp.ContentMD5) + _require.Nil(getResp.CacheControl) + _require.Nil(getResp.ContentDisposition) + _require.Equal(*getResp.ContentLength, int64(0)) + + _require.NotNil(getResp.ETag) + _require.NotNil(getResp.RequestID) + _require.NotNil(getResp.Version) + _require.Equal(getResp.Date.IsZero(), false) + _require.NotNil(getResp.IsServerEncrypted) + _require.EqualValues(getResp.Metadata, metadata) +} + +func (f *FileRecordedTestsSuite) TestFilePreservePermissions() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient(testcommon.GenerateFileName(testName)) + _, err = fClient.Create(context.Background(), 0, &file.CreateOptions{ + Permissions: &file.Permissions{ + Permission: &testcommon.SampleSDDL, + }, + }) + _require.NoError(err) + + // Grab the original perm key before we set file headers. + getResp, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + + pKey := getResp.FilePermissionKey + cTime := getResp.FileCreationTime + lwTime := getResp.FileLastWriteTime + attribs := getResp.FileAttributes + + md5Str := "MDAwMDAwMDA=" + testMd5 := []byte(md5Str) + + properties := file.SetHTTPHeadersOptions{ + HTTPHeaders: &file.HTTPHeaders{ + ContentType: to.Ptr("text/html"), + ContentEncoding: to.Ptr("gzip"), + ContentLanguage: to.Ptr("en"), + ContentMD5: testMd5, + CacheControl: to.Ptr("no-transform"), + ContentDisposition: to.Ptr("attachment"), + }, + // SMBProperties, when options are left nil, leads to preserving. + SMBProperties: &file.SMBProperties{}, + } + + setResp, err := fClient.SetHTTPHeaders(context.Background(), &properties) + _require.NoError(err) + _require.NotNil(setResp.ETag) + _require.NotNil(setResp.RequestID) + _require.NotNil(setResp.LastModified) + _require.Equal(setResp.LastModified.IsZero(), false) + _require.NotNil(setResp.Version) + _require.Equal(setResp.Date.IsZero(), false) + + getResp, err = fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.NotNil(setResp.LastModified) + _require.Equal(setResp.LastModified.IsZero(), false) + _require.Equal(*getResp.FileType, "File") + + _require.EqualValues(getResp.ContentType, properties.HTTPHeaders.ContentType) + _require.EqualValues(getResp.ContentEncoding, properties.HTTPHeaders.ContentEncoding) + _require.EqualValues(getResp.ContentLanguage, properties.HTTPHeaders.ContentLanguage) + _require.EqualValues(getResp.ContentMD5, properties.HTTPHeaders.ContentMD5) + _require.EqualValues(getResp.CacheControl, properties.HTTPHeaders.CacheControl) + _require.EqualValues(getResp.ContentDisposition, properties.HTTPHeaders.ContentDisposition) + _require.Equal(*getResp.ContentLength, int64(0)) + // Ensure that the permission key gets preserved + _require.EqualValues(getResp.FilePermissionKey, pKey) + _require.EqualValues(cTime, getResp.FileCreationTime) + _require.EqualValues(lwTime, getResp.FileLastWriteTime) + _require.EqualValues(attribs, getResp.FileAttributes) + + _require.NotNil(getResp.ETag) + _require.NotNil(getResp.RequestID) + _require.NotNil(getResp.Version) + _require.Equal(getResp.Date.IsZero(), false) + _require.NotNil(getResp.IsServerEncrypted) +} + +func (f *FileRecordedTestsSuite) TestFileGetSetPropertiesSnapshot() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer func() { + _, err := shareClient.Delete(context.Background(), &share.DeleteOptions{DeleteSnapshots: to.Ptr(share.DeleteSnapshotsOptionTypeInclude)}) + _require.NoError(err) + }() + + fClient := shareClient.NewRootDirectoryClient().NewFileClient(testcommon.GenerateFileName(testName)) + _, err = fClient.Create(context.Background(), 0, nil) + _require.NoError(err) + + md5Str := "MDAwMDAwMDA=" + testMd5 := []byte(md5Str) + + fileSetHTTPHeadersOptions := file.SetHTTPHeadersOptions{ + HTTPHeaders: &file.HTTPHeaders{ + ContentType: to.Ptr("text/html"), + ContentEncoding: to.Ptr("gzip"), + ContentLanguage: to.Ptr("en"), + ContentMD5: testMd5, + CacheControl: to.Ptr("no-transform"), + ContentDisposition: to.Ptr("attachment"), + }, + } + setResp, err := fClient.SetHTTPHeaders(context.Background(), &fileSetHTTPHeadersOptions) + _require.NoError(err) + _require.NotEqual(*setResp.ETag, "") + _require.Equal(setResp.LastModified.IsZero(), false) + _require.NotEqual(setResp.RequestID, "") + _require.NotEqual(setResp.Version, "") + _require.Equal(setResp.Date.IsZero(), false) + _require.NotNil(setResp.IsServerEncrypted) + + metadata := map[string]*string{ + "Foo": to.Ptr("Foovalue"), + "Bar": to.Ptr("Barvalue"), + } + _, err = fClient.SetMetadata(context.Background(), &file.SetMetadataOptions{ + Metadata: metadata, + }) + _require.NoError(err) + + resp, err := shareClient.CreateSnapshot(context.Background(), &share.CreateSnapshotOptions{Metadata: map[string]*string{}}) + _require.NoError(err) + _require.NotNil(resp.Snapshot) + + // get properties on the share snapshot + getResp, err := fClient.GetProperties(context.Background(), &file.GetPropertiesOptions{ + ShareSnapshot: resp.Snapshot, + }) + _require.NoError(err) + _require.Equal(setResp.LastModified.IsZero(), false) + _require.Equal(*getResp.FileType, "File") + + _require.EqualValues(getResp.ContentType, fileSetHTTPHeadersOptions.HTTPHeaders.ContentType) + _require.EqualValues(getResp.ContentEncoding, fileSetHTTPHeadersOptions.HTTPHeaders.ContentEncoding) + _require.EqualValues(getResp.ContentLanguage, fileSetHTTPHeadersOptions.HTTPHeaders.ContentLanguage) + _require.EqualValues(getResp.ContentMD5, fileSetHTTPHeadersOptions.HTTPHeaders.ContentMD5) + _require.EqualValues(getResp.CacheControl, fileSetHTTPHeadersOptions.HTTPHeaders.CacheControl) + _require.EqualValues(getResp.ContentDisposition, fileSetHTTPHeadersOptions.HTTPHeaders.ContentDisposition) + _require.Equal(*getResp.ContentLength, int64(0)) + + _require.NotNil(getResp.ETag) + _require.NotNil(getResp.RequestID) + _require.NotNil(getResp.Version) + _require.Equal(getResp.Date.IsZero(), false) + _require.NotNil(getResp.IsServerEncrypted) + _require.EqualValues(getResp.Metadata, metadata) +} + +func (f *FileRecordedTestsSuite) TestGetSetMetadataNonDefault() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient(testcommon.GenerateFileName(testName)) + _, err = fClient.Create(context.Background(), 0, nil) + _require.NoError(err) + + metadata := map[string]*string{ + "Foo": to.Ptr("Foovalue"), + "Bar": to.Ptr("Barvalue"), + } + setResp, err := fClient.SetMetadata(context.Background(), &file.SetMetadataOptions{ + Metadata: metadata, + }) + _require.NoError(err) + _require.NotNil(setResp.ETag) + _require.NotNil(setResp.RequestID) + _require.NotNil(setResp.Version) + _require.Equal(setResp.Date.IsZero(), false) + _require.NotNil(setResp.IsServerEncrypted) + + getResp, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.NotNil(getResp.ETag) + _require.NotNil(getResp.RequestID) + _require.NotNil(getResp.Version) + _require.Equal(getResp.Date.IsZero(), false) + _require.NotNil(getResp.IsServerEncrypted) + _require.EqualValues(getResp.Metadata, metadata) +} + +func (f *FileRecordedTestsSuite) TestFileSetMetadataNil() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient(testcommon.GenerateFileName(testName)) + _, err = fClient.Create(context.Background(), 0, nil) + _require.NoError(err) + + md := map[string]*string{"Not": to.Ptr("nil")} + + _, err = fClient.SetMetadata(context.Background(), &file.SetMetadataOptions{ + Metadata: md, + }) + _require.NoError(err) + + resp1, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.EqualValues(resp1.Metadata, md) + + _, err = fClient.SetMetadata(context.Background(), nil) + _require.NoError(err) + + resp2, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Len(resp2.Metadata, 0) +} + +func (f *FileRecordedTestsSuite) TestFileSetMetadataDefaultEmpty() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient(testcommon.GenerateFileName(testName)) + _, err = fClient.Create(context.Background(), 0, nil) + _require.NoError(err) + + md := map[string]*string{"Not": to.Ptr("nil")} + + _, err = fClient.SetMetadata(context.Background(), &file.SetMetadataOptions{ + Metadata: md, + }) + _require.NoError(err) + + resp1, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.EqualValues(resp1.Metadata, md) + + _, err = fClient.SetMetadata(context.Background(), &file.SetMetadataOptions{ + Metadata: map[string]*string{}, + }) + _require.NoError(err) + + resp2, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Len(resp2.Metadata, 0) +} + +func (f *FileRecordedTestsSuite) TestFileSetMetadataInvalidField() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient(testcommon.GenerateFileName(testName)) + _, err = fClient.Create(context.Background(), 0, nil) + _require.NoError(err) + + _, err = fClient.SetMetadata(context.Background(), &file.SetMetadataOptions{ + Metadata: map[string]*string{"!@#$%^&*()": to.Ptr("!@#$%^&*()")}, + }) + _require.Error(err) +} + +func (f *FileRecordedTestsSuite) TestStartCopyDefault() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + srcFile := shareClient.NewRootDirectoryClient().NewFileClient("src" + testcommon.GenerateFileName(testName)) + destFile := shareClient.NewRootDirectoryClient().NewFileClient("dest" + testcommon.GenerateFileName(testName)) + + fileSize := int64(2048) + _, err = srcFile.Create(context.Background(), fileSize, nil) + _require.NoError(err) + + contentR, srcContent := testcommon.GenerateData(int(fileSize)) + srcContentMD5 := md5.Sum(srcContent) + + _, err = srcFile.UploadRange(context.Background(), 0, contentR, nil) + _require.NoError(err) + + copyResp, err := destFile.StartCopyFromURL(context.Background(), srcFile.URL(), nil) + _require.NoError(err) + _require.NotNil(copyResp.ETag) + _require.Equal(copyResp.LastModified.IsZero(), false) + _require.NotNil(copyResp.RequestID) + _require.NotNil(copyResp.Version) + _require.Equal(copyResp.Date.IsZero(), false) + _require.NotEqual(copyResp.CopyStatus, "") + + time.Sleep(time.Duration(5) * time.Second) + + getResp, err := destFile.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.EqualValues(getResp.CopyID, copyResp.CopyID) + _require.NotEqual(*getResp.CopyStatus, "") + _require.Equal(*getResp.CopySource, srcFile.URL()) + _require.Equal(*getResp.CopyStatus, file.CopyStatusTypeSuccess) + + // Abort will fail after copy finished + _, err = destFile.AbortCopy(context.Background(), *copyResp.CopyID, nil) + _require.Error(err) + testcommon.ValidateHTTPErrorCode(_require, err, http.StatusConflict) + + // validate data copied + dResp, err := destFile.DownloadStream(context.Background(), &file.DownloadStreamOptions{ + Range: file.HTTPRange{Offset: 0, Count: fileSize}, + RangeGetContentMD5: to.Ptr(true), + }) + _require.NoError(err) + + destContent, err := io.ReadAll(dResp.Body) + _require.NoError(err) + _require.EqualValues(srcContent, destContent) + _require.Equal(dResp.ContentMD5, srcContentMD5[:]) +} + +func (f *FileRecordedTestsSuite) TestFileStartCopyDestEmpty() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := testcommon.CreateNewFileFromShareWithData(context.Background(), _require, "src"+testcommon.GenerateFileName(testName), shareClient) + copyFClient := testcommon.GetFileClientFromShare("dest"+testcommon.GenerateFileName(testName), shareClient) + + _, err = copyFClient.StartCopyFromURL(context.Background(), fClient.URL(), nil) + _require.NoError(err) + + time.Sleep(4 * time.Second) + + resp, err := copyFClient.DownloadStream(context.Background(), nil) + _require.NoError(err) + + // Read the file data to verify the copy + data, err := ioutil.ReadAll(resp.Body) + defer func() { + err = resp.Body.Close() + _require.NoError(err) + }() + + _require.NoError(err) + _require.Equal(*resp.ContentLength, int64(len(testcommon.FileDefaultData))) + _require.Equal(string(data), testcommon.FileDefaultData) +} + +func (f *FileRecordedTestsSuite) TestFileStartCopyMetadata() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient("src" + testcommon.GenerateFileName(testName)) + copyFClient := shareClient.NewRootDirectoryClient().NewFileClient("dst" + testcommon.GenerateFileName(testName)) + + _, err = fClient.Create(context.Background(), 0, nil) + _require.NoError(err) + + basicMetadata := map[string]*string{ + "Foo": to.Ptr("Foovalue"), + "Bar": to.Ptr("Barvalue"), + } + _, err = copyFClient.StartCopyFromURL(context.Background(), fClient.URL(), &file.StartCopyFromURLOptions{Metadata: basicMetadata}) + _require.NoError(err) + + time.Sleep(4 * time.Second) + + resp2, err := copyFClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.EqualValues(resp2.Metadata, basicMetadata) +} + +func (f *FileRecordedTestsSuite) TestFileStartCopyMetadataNil() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient("src" + testcommon.GenerateFileName(testName)) + copyFClient := shareClient.NewRootDirectoryClient().NewFileClient("dst" + testcommon.GenerateFileName(testName)) + + _, err = fClient.Create(context.Background(), 0, nil) + _require.NoError(err) + + basicMetadata := map[string]*string{ + "Foo": to.Ptr("Foovalue"), + "Bar": to.Ptr("Barvalue"), + } + + // Have the destination start with metadata so we ensure the nil metadata passed later takes effect + _, err = copyFClient.Create(context.Background(), 0, &file.CreateOptions{Metadata: basicMetadata}) + _require.NoError(err) + + gResp, err := copyFClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.EqualValues(gResp.Metadata, basicMetadata) + + _, err = copyFClient.StartCopyFromURL(context.Background(), fClient.URL(), nil) + _require.NoError(err) + + time.Sleep(4 * time.Second) + + resp2, err := copyFClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Len(resp2.Metadata, 0) +} + +func (f *FileRecordedTestsSuite) TestFileStartCopyMetadataEmpty() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient("src" + testcommon.GenerateFileName(testName)) + copyFClient := shareClient.NewRootDirectoryClient().NewFileClient("dst" + testcommon.GenerateFileName(testName)) + + _, err = fClient.Create(context.Background(), 0, nil) + _require.NoError(err) + + basicMetadata := map[string]*string{ + "Foo": to.Ptr("Foovalue"), + "Bar": to.Ptr("Barvalue"), + } + + // Have the destination start with metadata so we ensure the nil metadata passed later takes effect + _, err = copyFClient.Create(context.Background(), 0, &file.CreateOptions{Metadata: basicMetadata}) + _require.NoError(err) + + gResp, err := copyFClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.EqualValues(gResp.Metadata, basicMetadata) + + _, err = copyFClient.StartCopyFromURL(context.Background(), fClient.URL(), &file.StartCopyFromURLOptions{Metadata: map[string]*string{}}) + _require.NoError(err) + + time.Sleep(4 * time.Second) + + resp2, err := copyFClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Len(resp2.Metadata, 0) +} + +func (f *FileRecordedTestsSuite) TestFileStartCopyNegativeMetadataInvalidField() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient("src" + testcommon.GenerateFileName(testName)) + copyFClient := shareClient.NewRootDirectoryClient().NewFileClient("dst" + testcommon.GenerateFileName(testName)) + + _, err = fClient.Create(context.Background(), 0, nil) + _require.NoError(err) + + _, err = copyFClient.StartCopyFromURL(context.Background(), fClient.URL(), &file.StartCopyFromURLOptions{ + Metadata: map[string]*string{"!@#$%^&*()": to.Ptr("!@#$%^&*()")}, + }) + _require.Error(err) +} + +func (f *FileRecordedTestsSuite) TestFileStartCopySourceCreationTime() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient("src" + testcommon.GenerateFileName(testName)) + copyFClient := shareClient.NewRootDirectoryClient().NewFileClient("dst" + testcommon.GenerateFileName(testName)) + + currTime, err := time.Parse(time.UnixDate, "Fri Mar 31 21:00:00 GMT 2023") + _require.NoError(err) + + cResp, err := fClient.Create(context.Background(), 0, &file.CreateOptions{ + SMBProperties: &file.SMBProperties{ + Attributes: &file.NTFSFileAttributes{ReadOnly: true, Hidden: true}, + CreationTime: to.Ptr(currTime.Add(5 * time.Minute)), + LastWriteTime: to.Ptr(currTime.Add(2 * time.Minute)), + }, + }) + _require.NoError(err) + _require.NotNil(cResp.FileCreationTime) + _require.NotNil(cResp.FileLastWriteTime) + _require.NotNil(cResp.FileAttributes) + _require.NotNil(cResp.FilePermissionKey) + + _, err = copyFClient.StartCopyFromURL(context.Background(), fClient.URL(), &file.StartCopyFromURLOptions{ + CopyFileSMBInfo: &file.CopyFileSMBInfo{ + CreationTime: file.SourceCopyFileCreationTime{}, + }, + }) + _require.NoError(err) + + time.Sleep(4 * time.Second) + + resp2, err := copyFClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.EqualValues(resp2.FileCreationTime, cResp.FileCreationTime) + _require.NotEqualValues(resp2.FileLastWriteTime, cResp.FileLastWriteTime) + _require.NotEqualValues(resp2.FileAttributes, cResp.FileAttributes) +} + +func (f *FileRecordedTestsSuite) TestFileStartCopySourceProperties() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient("src" + testcommon.GenerateFileName(testName)) + copyFClient := shareClient.NewRootDirectoryClient().NewFileClient("dst" + testcommon.GenerateFileName(testName)) + + currTime, err := time.Parse(time.UnixDate, "Fri Mar 31 20:00:00 GMT 2023") + _require.NoError(err) + + cResp, err := fClient.Create(context.Background(), 0, &file.CreateOptions{ + SMBProperties: &file.SMBProperties{ + Attributes: &file.NTFSFileAttributes{System: true}, + CreationTime: to.Ptr(currTime.Add(1 * time.Minute)), + LastWriteTime: to.Ptr(currTime.Add(2 * time.Minute)), + }, + }) + _require.NoError(err) + _require.NotNil(cResp.FileCreationTime) + _require.NotNil(cResp.FileLastWriteTime) + _require.NotNil(cResp.FileAttributes) + _require.NotNil(cResp.FilePermissionKey) + + _, err = copyFClient.StartCopyFromURL(context.Background(), fClient.URL(), &file.StartCopyFromURLOptions{ + CopyFileSMBInfo: &file.CopyFileSMBInfo{ + CreationTime: file.SourceCopyFileCreationTime{}, + LastWriteTime: file.SourceCopyFileLastWriteTime{}, + Attributes: file.SourceCopyFileAttributes{}, + PermissionCopyMode: to.Ptr(file.PermissionCopyModeTypeSource), + }, + }) + _require.NoError(err) + + time.Sleep(4 * time.Second) + + resp2, err := copyFClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.EqualValues(resp2.FileCreationTime, cResp.FileCreationTime) + _require.EqualValues(resp2.FileLastWriteTime, cResp.FileLastWriteTime) + _require.EqualValues(resp2.FileAttributes, cResp.FileAttributes) + _require.EqualValues(resp2.FilePermissionKey, cResp.FilePermissionKey) +} + +func (f *FileRecordedTestsSuite) TestFileStartCopyDifferentProperties() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient("src" + testcommon.GenerateFileName(testName)) + copyFClient := shareClient.NewRootDirectoryClient().NewFileClient("dst" + testcommon.GenerateFileName(testName)) + + currTime, err := time.Parse(time.UnixDate, "Fri Mar 31 20:00:00 GMT 2023") + _require.NoError(err) + + cResp, err := fClient.Create(context.Background(), 0, &file.CreateOptions{ + SMBProperties: &file.SMBProperties{ + Attributes: &file.NTFSFileAttributes{System: true}, + CreationTime: to.Ptr(currTime.Add(1 * time.Minute)), + LastWriteTime: to.Ptr(currTime.Add(2 * time.Minute)), + }, + }) + _require.NoError(err) + _require.NotNil(cResp.FileCreationTime) + _require.NotNil(cResp.FileLastWriteTime) + _require.NotNil(cResp.FileAttributes) + _require.NotNil(cResp.FilePermissionKey) + + destCreationTime := currTime.Add(5 * time.Minute) + destLastWriteTIme := currTime.Add(6 * time.Minute) + _, err = copyFClient.StartCopyFromURL(context.Background(), fClient.URL(), &file.StartCopyFromURLOptions{ + CopyFileSMBInfo: &file.CopyFileSMBInfo{ + CreationTime: file.DestinationCopyFileCreationTime(destCreationTime), + LastWriteTime: file.DestinationCopyFileLastWriteTime(destLastWriteTIme), + Attributes: file.DestinationCopyFileAttributes{ReadOnly: true}, + }, + }) + _require.NoError(err) + + time.Sleep(4 * time.Second) + + resp2, err := copyFClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.NotEqualValues(resp2.FileCreationTime, cResp.FileCreationTime) + _require.EqualValues(*resp2.FileCreationTime, destCreationTime.UTC()) + _require.NotEqualValues(resp2.FileLastWriteTime, cResp.FileLastWriteTime) + _require.EqualValues(*resp2.FileLastWriteTime, destLastWriteTIme.UTC()) + _require.NotEqualValues(resp2.FileAttributes, cResp.FileAttributes) + _require.EqualValues(resp2.FilePermissionKey, cResp.FilePermissionKey) +} + +func (f *FileRecordedTestsSuite) TestFileStartCopyOverrideMode() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient("src" + testcommon.GenerateFileName(testName)) + copyFClient := shareClient.NewRootDirectoryClient().NewFileClient("dst" + testcommon.GenerateFileName(testName)) + + cResp, err := fClient.Create(context.Background(), 0, nil) + _require.NoError(err) + _require.NotNil(cResp.FileCreationTime) + _require.NotNil(cResp.FileLastWriteTime) + _require.NotNil(cResp.FileAttributes) + _require.NotNil(cResp.FilePermissionKey) + + _, err = copyFClient.StartCopyFromURL(context.Background(), fClient.URL(), &file.StartCopyFromURLOptions{ + Permissions: &file.Permissions{ + Permission: to.Ptr(testcommon.SampleSDDL), + }, + CopyFileSMBInfo: &file.CopyFileSMBInfo{ + PermissionCopyMode: to.Ptr(file.PermissionCopyModeTypeOverride), + }, + }) + _require.NoError(err) + + time.Sleep(4 * time.Second) + + resp2, err := copyFClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.NotEqualValues(resp2.FileCreationTime, cResp.FileCreationTime) + _require.NotEqualValues(resp2.FileLastWriteTime, cResp.FileLastWriteTime) + _require.NotEqualValues(resp2.FilePermissionKey, cResp.FilePermissionKey) +} + +func (f *FileRecordedTestsSuite) TestNegativeFileStartCopyOverrideMode() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient("src" + testcommon.GenerateFileName(testName)) + copyFClient := shareClient.NewRootDirectoryClient().NewFileClient("dst" + testcommon.GenerateFileName(testName)) + + cResp, err := fClient.Create(context.Background(), 0, nil) + _require.NoError(err) + _require.NotNil(cResp.FileCreationTime) + _require.NotNil(cResp.FileLastWriteTime) + _require.NotNil(cResp.FileAttributes) + _require.NotNil(cResp.FilePermissionKey) + + // permission or permission key is required when the PermissionCopyMode is override. + _, err = copyFClient.StartCopyFromURL(context.Background(), fClient.URL(), &file.StartCopyFromURLOptions{ + CopyFileSMBInfo: &file.CopyFileSMBInfo{ + PermissionCopyMode: to.Ptr(file.PermissionCopyModeTypeOverride), + }, + }) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.MissingRequiredHeader) +} + +func (f *FileRecordedTestsSuite) TestFileStartCopySetArchiveAttributeTrue() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient("src" + testcommon.GenerateFileName(testName)) + copyFClient := shareClient.NewRootDirectoryClient().NewFileClient("dst" + testcommon.GenerateFileName(testName)) + + cResp, err := fClient.Create(context.Background(), 0, &file.CreateOptions{ + SMBProperties: &file.SMBProperties{ + Attributes: &file.NTFSFileAttributes{ReadOnly: true, Hidden: true}, + }, + }) + _require.NoError(err) + _require.NotNil(cResp.FileCreationTime) + _require.NotNil(cResp.FileLastWriteTime) + _require.NotNil(cResp.FileAttributes) + _require.NotNil(cResp.FilePermissionKey) + + _, err = copyFClient.StartCopyFromURL(context.Background(), fClient.URL(), &file.StartCopyFromURLOptions{ + CopyFileSMBInfo: &file.CopyFileSMBInfo{ + Attributes: file.DestinationCopyFileAttributes{System: true, ReadOnly: true}, + SetArchiveAttribute: to.Ptr(true), + }, + }) + _require.NoError(err) + + time.Sleep(4 * time.Second) + + resp2, err := copyFClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.NotEqualValues(resp2.FileCreationTime, cResp.FileCreationTime) + _require.NotEqualValues(resp2.FileLastWriteTime, cResp.FileLastWriteTime) + _require.Contains(*resp2.FileAttributes, "Archive") +} + +func (f *FileRecordedTestsSuite) TestFileStartCopySetArchiveAttributeFalse() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient("src" + testcommon.GenerateFileName(testName)) + copyFClient := shareClient.NewRootDirectoryClient().NewFileClient("dst" + testcommon.GenerateFileName(testName)) + + cResp, err := fClient.Create(context.Background(), 0, &file.CreateOptions{ + SMBProperties: &file.SMBProperties{ + Attributes: &file.NTFSFileAttributes{ReadOnly: true, Hidden: true}, + }, + }) + _require.NoError(err) + _require.NotNil(cResp.FileCreationTime) + _require.NotNil(cResp.FileLastWriteTime) + _require.NotNil(cResp.FileAttributes) + _require.NotNil(cResp.FilePermissionKey) + + _, err = copyFClient.StartCopyFromURL(context.Background(), fClient.URL(), &file.StartCopyFromURLOptions{ + CopyFileSMBInfo: &file.CopyFileSMBInfo{ + Attributes: file.DestinationCopyFileAttributes{System: true, ReadOnly: true}, + SetArchiveAttribute: to.Ptr(false), + }, + }) + _require.NoError(err) + + time.Sleep(4 * time.Second) + + resp2, err := copyFClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.NotEqualValues(resp2.FileCreationTime, cResp.FileCreationTime) + _require.NotEqualValues(resp2.FileLastWriteTime, cResp.FileLastWriteTime) + _require.NotContains(*resp2.FileAttributes, "Archive") +} + +func (f *FileRecordedTestsSuite) TestFileStartCopyDestReadOnly() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient("src" + testcommon.GenerateFileName(testName)) + copyFClient := shareClient.NewRootDirectoryClient().NewFileClient("dst" + testcommon.GenerateFileName(testName)) + + cResp, err := fClient.Create(context.Background(), 0, nil) + _require.NoError(err) + _require.NotNil(cResp.FileCreationTime) + _require.NotNil(cResp.FileLastWriteTime) + _require.NotNil(cResp.FileAttributes) + _require.NotNil(cResp.FilePermissionKey) + + _, err = copyFClient.Create(context.Background(), 0, &file.CreateOptions{ + SMBProperties: &file.SMBProperties{ + Attributes: &file.NTFSFileAttributes{ReadOnly: true}, + }, + }) + _require.NoError(err) + + _, err = copyFClient.StartCopyFromURL(context.Background(), fClient.URL(), &file.StartCopyFromURLOptions{ + CopyFileSMBInfo: &file.CopyFileSMBInfo{ + IgnoreReadOnly: to.Ptr(true), + }, + }) + _require.NoError(err) + + time.Sleep(4 * time.Second) + + resp2, err := copyFClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.NotEqualValues(resp2.FileCreationTime, cResp.FileCreationTime) + _require.NotEqualValues(resp2.FileLastWriteTime, cResp.FileLastWriteTime) +} + +func (f *FileRecordedTestsSuite) TestNegativeFileStartCopyDestReadOnly() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient("src" + testcommon.GenerateFileName(testName)) + copyFClient := shareClient.NewRootDirectoryClient().NewFileClient("dst" + testcommon.GenerateFileName(testName)) + + cResp, err := fClient.Create(context.Background(), 0, nil) + _require.NoError(err) + _require.NotNil(cResp.FileCreationTime) + _require.NotNil(cResp.FileLastWriteTime) + _require.NotNil(cResp.FileAttributes) + _require.NotNil(cResp.FilePermissionKey) + + _, err = copyFClient.Create(context.Background(), 0, &file.CreateOptions{ + SMBProperties: &file.SMBProperties{ + Attributes: &file.NTFSFileAttributes{ReadOnly: true}, + }, + }) + _require.NoError(err) + + _, err = copyFClient.StartCopyFromURL(context.Background(), fClient.URL(), nil) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.ReadOnlyAttribute) +} + +func (f *FileRecordedTestsSuite) TestFileStartCopySourceNonExistent() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient("src" + testcommon.GenerateFileName(testName)) + copyFClient := shareClient.NewRootDirectoryClient().NewFileClient("dst" + testcommon.GenerateFileName(testName)) + + _, err = copyFClient.StartCopyFromURL(context.Background(), fClient.URL(), nil) + testcommon.ValidateFileErrorCode(_require, err, fileerror.ResourceNotFound) +} + +func (f *FileUnrecordedTestsSuite) TestFileStartCopyUsingSASSrc() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, "src"+shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fileName := testcommon.GenerateFileName(testName) + fClient := testcommon.CreateNewFileFromShareWithData(context.Background(), _require, "src"+fileName, shareClient) + + fileURLWithSAS, err := fClient.GetSASURL(sas.FilePermissions{Read: true, Write: true, Create: true, Delete: true}, time.Now().Add(5*time.Minute).UTC(), nil) + _require.NoError(err) + + // Create a new share for the destination + copyShareClient := testcommon.CreateNewShare(context.Background(), _require, "dest"+shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, copyShareClient) + + copyFileClient := testcommon.GetFileClientFromShare("dst"+fileName, copyShareClient) + + _, err = copyFileClient.StartCopyFromURL(context.Background(), fileURLWithSAS, nil) + _require.NoError(err) + + time.Sleep(4 * time.Second) + + dResp, err := copyFileClient.DownloadStream(context.Background(), nil) + _require.NoError(err) + + data, err := ioutil.ReadAll(dResp.Body) + defer func() { + err = dResp.Body.Close() + _require.NoError(err) + }() + + _require.NoError(err) + _require.Equal(*dResp.ContentLength, int64(len(testcommon.FileDefaultData))) + _require.Equal(string(data), testcommon.FileDefaultData) +} + +func (f *FileRecordedTestsSuite) TestFileAbortCopyNoCopyStarted() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + copyFClient := shareClient.NewRootDirectoryClient().NewFileClient(testcommon.GenerateFileName(testName)) + _, err = copyFClient.AbortCopy(context.Background(), "copynotstarted", nil) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.InvalidQueryParameterValue) +} + +func (f *FileRecordedTestsSuite) TestResizeFile() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient("src" + testcommon.GenerateFileName(testName)) + _, err = fClient.Create(context.Background(), 1234, nil) + _require.NoError(err) + + gResp, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp.ContentLength, int64(1234)) + + _, err = fClient.Resize(context.Background(), 4096, nil) + _require.NoError(err) + + gResp, err = fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp.ContentLength, int64(4096)) +} + +func (f *FileRecordedTestsSuite) TestFileResizeZero() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient("src" + testcommon.GenerateFileName(testName)) + _, err = fClient.Create(context.Background(), 10, nil) + _require.NoError(err) + + _, err = fClient.Resize(context.Background(), 0, nil) + _require.NoError(err) + + resp, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*resp.ContentLength, int64(0)) +} + +func (f *FileRecordedTestsSuite) TestFileResizeInvalidSizeNegative() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient("src" + testcommon.GenerateFileName(testName)) + _, err = fClient.Create(context.Background(), 0, nil) + _require.NoError(err) + + _, err = fClient.Resize(context.Background(), -4, nil) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.OutOfRangeInput) +} + +func (f *FileRecordedTestsSuite) TestNegativeFileSizeMoreThanShareQuota() { + _require := require.New(f.T()) + testName := f.T().Name() + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + var fileShareMaxQuota int32 = 1024 // share size in GiB which is 1TiB + var fileMaxAllowedSizeInBytes int64 = 4398046511104 // file size in bytes which is 4 TiB + + shareClient := testcommon.GetShareClient(testcommon.GenerateShareName(testName), svcClient) + _, err = shareClient.Create(context.Background(), &share.CreateOptions{ + Quota: &fileShareMaxQuota, + }) + _require.NoError(err) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient(testcommon.GenerateFileName(testName)) + _, err = fClient.Create(context.Background(), fileMaxAllowedSizeInBytes, &file.CreateOptions{ + HTTPHeaders: &file.HTTPHeaders{}, + }) + _require.Error(err) +} + +func (f *FileRecordedTestsSuite) TestCreateMaximumSizeFileShare() { + _require := require.New(f.T()) + testName := f.T().Name() + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + var fileShareMaxQuota int32 = 5120 // share size in GiB which is 5TiB + var fileMaxAllowedSizeInBytes int64 = 4398046511104 // file size in bytes which is 4 TiB + + shareClient := testcommon.GetShareClient(testcommon.GenerateShareName(testName), svcClient) + _, err = shareClient.Create(context.Background(), &share.CreateOptions{ + Quota: &fileShareMaxQuota, + }) + _require.NoError(err) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := shareClient.NewRootDirectoryClient().NewFileClient(testcommon.GenerateFileName(testName)) + _, err = fClient.Create(context.Background(), fileMaxAllowedSizeInBytes, &file.CreateOptions{ + HTTPHeaders: &file.HTTPHeaders{}, + }) + _require.NoError(err) +} + +func (f *FileRecordedTestsSuite) TestSASFileClientNoKey() { + _require := require.New(f.T()) + accountName, _ := testcommon.GetGenericAccountInfo(testcommon.TestAccountDefault) + _require.Greater(len(accountName), 0) + + testName := f.T().Name() + shareName := testcommon.GenerateShareName(testName) + fileName := testcommon.GenerateFileName(testName) + fileClient, err := file.NewClientWithNoCredential(fmt.Sprintf("https://%s.file.core.windows.net/%v/%v", accountName, shareName, fileName), nil) + _require.NoError(err) + + permissions := sas.FilePermissions{ + Read: true, + Write: true, + Delete: true, + Create: true, + } + expiry := time.Now().Add(time.Hour) + + _, err = fileClient.GetSASURL(permissions, expiry, nil) + _require.Equal(err, fileerror.MissingSharedKeyCredential) +} + +func (f *FileRecordedTestsSuite) TestSASFileClientSignNegative() { + _require := require.New(f.T()) + accountName, accountKey := testcommon.GetGenericAccountInfo(testcommon.TestAccountDefault) + _require.Greater(len(accountName), 0) + _require.Greater(len(accountKey), 0) + + cred, err := file.NewSharedKeyCredential(accountName, accountKey) + _require.NoError(err) + + testName := f.T().Name() + shareName := testcommon.GenerateShareName(testName) + fileName := testcommon.GenerateFileName(testName) + fileClient, err := file.NewClientWithSharedKeyCredential(fmt.Sprintf("https://%s.file.core.windows.net/%v%v", accountName, shareName, fileName), cred, nil) + _require.NoError(err) + + permissions := sas.FilePermissions{ + Read: true, + Write: true, + Delete: true, + Create: true, + } + expiry := time.Time{} + + // zero expiry time + _, err = fileClient.GetSASURL(permissions, expiry, &file.GetSASURLOptions{StartTime: to.Ptr(time.Now())}) + _require.Equal(err.Error(), "service SAS is missing at least one of these: ExpiryTime or Permissions") + + // zero start and expiry time + _, err = fileClient.GetSASURL(permissions, expiry, &file.GetSASURLOptions{}) + _require.Equal(err.Error(), "service SAS is missing at least one of these: ExpiryTime or Permissions") + + // empty permissions + _, err = fileClient.GetSASURL(sas.FilePermissions{}, expiry, nil) + _require.Equal(err.Error(), "service SAS is missing at least one of these: ExpiryTime or Permissions") +} + +func (f *FileRecordedTestsSuite) TestFileUploadClearListRange() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + var fileSize int64 = 1024 * 10 + fClient := shareClient.NewRootDirectoryClient().NewFileClient(testcommon.GenerateFileName(testName)) + _, err = fClient.Create(context.Background(), fileSize, nil) + _require.NoError(err) + + gResp, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp.ContentLength, fileSize) + + contentSize := 1024 * 2 // 2KB + contentR, contentD := testcommon.GenerateData(contentSize) + md5Value := md5.Sum(contentD) + contentMD5 := md5Value[:] + + uResp, err := fClient.UploadRange(context.Background(), 0, contentR, &file.UploadRangeOptions{ + TransactionalValidation: file.TransferValidationTypeMD5(contentMD5), + }) + _require.NoError(err) + _require.NotNil(uResp.ContentMD5) + _require.EqualValues(uResp.ContentMD5, contentMD5) + + rangeList, err := fClient.GetRangeList(context.Background(), nil) + _require.NoError(err) + _require.Len(rangeList.Ranges, 1) + _require.EqualValues(*rangeList.Ranges[0], file.ShareFileRange{Start: to.Ptr(int64(0)), End: to.Ptr(int64(contentSize - 1))}) + + cResp, err := fClient.ClearRange(context.Background(), file.HTTPRange{Offset: 0, Count: int64(contentSize)}, nil) + _require.NoError(err) + _require.Nil(cResp.ContentMD5) + + rangeList2, err := fClient.GetRangeList(context.Background(), nil) + _require.NoError(err) + _require.Len(rangeList2.Ranges, 0) +} + +func (f *FileUnrecordedTestsSuite) TestFileUploadRangeFromURL() { + _require := require.New(f.T()) + testName := f.T().Name() + + cred, err := testcommon.GetGenericSharedKeyCredential(testcommon.TestAccountDefault) + _require.NoError(err) + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + var fileSize int64 = 1024 * 20 + srcFileName := "src" + testcommon.GenerateFileName(testName) + srcFClient := shareClient.NewRootDirectoryClient().NewFileClient(srcFileName) + _, err = srcFClient.Create(context.Background(), fileSize, nil) + _require.NoError(err) + + gResp, err := srcFClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp.ContentLength, fileSize) + + contentSize := 1024 * 8 // 8KB + content := make([]byte, contentSize) + body := bytes.NewReader(content) + rsc := streaming.NopCloser(body) + contentCRC64 := crc64.Checksum(content, shared.CRC64Table) + + _, err = srcFClient.UploadRange(context.Background(), 0, rsc, nil) + _require.NoError(err) + + perms := sas.FilePermissions{Read: true, Write: true} + sasQueryParams, err := sas.SignatureValues{ + Protocol: sas.ProtocolHTTPS, // Users MUST use HTTPS (not HTTP) + ExpiryTime: time.Now().UTC().Add(48 * time.Hour), // 48-hours before expiration + ShareName: shareName, + FilePath: srcFileName, + Permissions: perms.String(), + }.SignWithSharedKey(cred) + _require.NoError(err) + + srcFileSAS := srcFClient.URL() + "?" + sasQueryParams.Encode() + + destFClient := shareClient.NewRootDirectoryClient().NewFileClient("dest" + testcommon.GenerateFileName(testName)) + _, err = destFClient.Create(context.Background(), fileSize, nil) + _require.NoError(err) + + uResp, err := destFClient.UploadRangeFromURL(context.Background(), srcFileSAS, 0, 0, int64(contentSize), &file.UploadRangeFromURLOptions{ + SourceContentCRC64: contentCRC64, + }) + _require.NoError(err) + _require.NotNil(uResp.XMSContentCRC64) + _require.EqualValues(binary.LittleEndian.Uint64(uResp.XMSContentCRC64), contentCRC64) + + rangeList, err := destFClient.GetRangeList(context.Background(), nil) + _require.NoError(err) + _require.Len(rangeList.Ranges, 1) + _require.Equal(*rangeList.Ranges[0].Start, int64(0)) + _require.Equal(*rangeList.Ranges[0].End, int64(contentSize-1)) + + cResp, err := destFClient.ClearRange(context.Background(), file.HTTPRange{Offset: 0, Count: int64(contentSize)}, nil) + _require.NoError(err) + _require.Nil(cResp.ContentMD5) + + rangeList2, err := destFClient.GetRangeList(context.Background(), nil) + _require.NoError(err) + _require.Len(rangeList2.Ranges, 0) +} + +func (f *FileRecordedTestsSuite) TestFileUploadRangeFromURLNegative() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + var fileSize int64 = 1024 * 20 + srcFileName := "src" + testcommon.GenerateFileName(testName) + srcFClient := testcommon.CreateNewFileFromShare(context.Background(), _require, srcFileName, fileSize, shareClient) + + gResp, err := srcFClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp.ContentLength, fileSize) + + contentSize := 1024 * 8 // 8KB + rsc, content := testcommon.GenerateData(contentSize) + contentCRC64 := crc64.Checksum(content, shared.CRC64Table) + + _, err = srcFClient.UploadRange(context.Background(), 0, rsc, nil) + _require.NoError(err) + + destFClient := testcommon.CreateNewFileFromShare(context.Background(), _require, "dest"+testcommon.GenerateFileName(testName), fileSize, shareClient) + + _, err = destFClient.UploadRangeFromURL(context.Background(), srcFClient.URL(), 0, 0, int64(contentSize), &file.UploadRangeFromURLOptions{ + SourceContentCRC64: contentCRC64, + }) + _require.Error(err) +} + +func (f *FileRecordedTestsSuite) TestFileUploadRangeFromURLOffsetNegative() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + var fileSize int64 = 1024 * 20 + srcFileName := "src" + testcommon.GenerateFileName(testName) + srcFClient := testcommon.CreateNewFileFromShare(context.Background(), _require, srcFileName, fileSize, shareClient) + + gResp, err := srcFClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp.ContentLength, fileSize) + + contentSize := 1024 * 8 // 8KB + destFClient := testcommon.CreateNewFileFromShare(context.Background(), _require, "dest"+testcommon.GenerateFileName(testName), fileSize, shareClient) + + // error is returned when source offset is negative + _, err = destFClient.UploadRangeFromURL(context.Background(), srcFClient.URL(), -1, 0, int64(contentSize), nil) + _require.Error(err) + _require.Equal(err.Error(), "invalid argument: source and destination offsets must be >= 0") +} + +func (f *FileUnrecordedTestsSuite) TestFileUploadRangeFromURLCopySourceAuthBlob() { + _require := require.New(f.T()) + testName := f.T().Name() + + accountName, _ := testcommon.GetGenericAccountInfo(testcommon.TestAccountDefault) + _require.Greater(len(accountName), 0) + + cred, err := testcommon.GetGenericTokenCredential() + _require.NoError(err) + + // Getting token + accessToken, err := cred.GetToken(context.Background(), policy.TokenRequestOptions{Scopes: []string{"https://storage.azure.com/.default"}}) + _require.NoError(err) + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + var fileSize int64 = 1024 * 10 + contentSize := 1024 * 8 // 8KB + _, content := testcommon.GenerateData(contentSize) + contentCRC64 := crc64.Checksum(content, shared.CRC64Table) + + // create source block blob + blobClient, err := azblob.NewClient("https://"+accountName+".blob.core.windows.net/", cred, nil) + _require.NoError(err) + + containerName := "goc" + testcommon.GenerateEntityName(testName) + blobName := "blob" + testcommon.GenerateEntityName(testName) + _, err = blobClient.CreateContainer(context.Background(), containerName, nil) + _require.NoError(err) + defer func() { + _, err := blobClient.DeleteContainer(context.Background(), containerName, nil) + _require.NoError(err) + }() + + _, err = blobClient.UploadBuffer(context.Background(), containerName, blobName, content, nil) + _require.NoError(err) + + destFClient := shareClient.NewRootDirectoryClient().NewFileClient("dest" + testcommon.GenerateFileName(testName)) + _, err = destFClient.Create(context.Background(), fileSize, nil) + _require.NoError(err) + + blobURL := blobClient.ServiceClient().NewContainerClient(containerName).NewBlockBlobClient(blobName).URL() + uResp, err := destFClient.UploadRangeFromURL(context.Background(), blobURL, 0, 0, int64(contentSize), &file.UploadRangeFromURLOptions{ + SourceContentCRC64: contentCRC64, + CopySourceAuthorization: to.Ptr("Bearer " + accessToken.Token), + }) + _require.NoError(err) + _require.NotNil(uResp.XMSContentCRC64) + _require.EqualValues(binary.LittleEndian.Uint64(uResp.XMSContentCRC64), contentCRC64) + + // validate the content uploaded + dResp, err := destFClient.DownloadStream(context.Background(), &file.DownloadStreamOptions{ + Range: file.HTTPRange{Offset: 0, Count: int64(contentSize)}, + }) + _require.NoError(err) + + data, err := ioutil.ReadAll(dResp.Body) + defer func() { + err = dResp.Body.Close() + _require.NoError(err) + }() + + _require.EqualValues(data, content) +} + +func (f *FileUnrecordedTestsSuite) TestFileUploadBuffer() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + var fileSize int64 = 100 * 1024 * 1024 + fClient := shareClient.NewRootDirectoryClient().NewFileClient(testcommon.GenerateFileName(testName)) + _, err = fClient.Create(context.Background(), fileSize, nil) + _require.NoError(err) + + gResp, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp.ContentLength, fileSize) + + content := make([]byte, fileSize) + _, err = rand.Read(content) + _require.NoError(err) + md5Value := md5.Sum(content) + contentMD5 := md5Value[:] + + err = fClient.UploadBuffer(context.Background(), content, &file.UploadBufferOptions{ + Concurrency: 5, + ChunkSize: 4 * 1024 * 1024, + }) + _require.NoError(err) + + dResp, err := fClient.DownloadStream(context.Background(), nil) + _require.NoError(err) + + data, err := io.ReadAll(dResp.Body) + _require.NoError(err) + + downloadedMD5Value := md5.Sum(data) + downloadedContentMD5 := downloadedMD5Value[:] + + _require.EqualValues(downloadedContentMD5, contentMD5) + + gResp2, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, fileSize) + + rangeList, err := fClient.GetRangeList(context.Background(), nil) + _require.NoError(err) + _require.Len(rangeList.Ranges, 1) + _require.Equal(*rangeList.Ranges[0].Start, int64(0)) + _require.Equal(*rangeList.Ranges[0].End, fileSize-1) +} + +func (f *FileUnrecordedTestsSuite) TestFileUploadFile() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + var fileSize int64 = 200 * 1024 * 1024 + fClient := shareClient.NewRootDirectoryClient().NewFileClient(testcommon.GenerateFileName(testName)) + _, err = fClient.Create(context.Background(), fileSize, nil) + _require.NoError(err) + + gResp, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp.ContentLength, fileSize) + + // create local file + content := make([]byte, fileSize) + _, err = rand.Read(content) + _require.NoError(err) + err = ioutil.WriteFile("testFile", content, 0644) + _require.NoError(err) + + defer func() { + err = os.Remove("testFile") + _require.NoError(err) + }() + + fh, err := os.Open("testFile") + _require.NoError(err) + + defer func(fh *os.File) { + err := fh.Close() + _require.NoError(err) + }(fh) + + hash := md5.New() + _, err = io.Copy(hash, fh) + _require.NoError(err) + contentMD5 := hash.Sum(nil) + + err = fClient.UploadFile(context.Background(), fh, &file.UploadFileOptions{ + Concurrency: 5, + ChunkSize: 4 * 1024 * 1024, + }) + _require.NoError(err) + + dResp, err := fClient.DownloadStream(context.Background(), nil) + _require.NoError(err) + + data, err := io.ReadAll(dResp.Body) + _require.NoError(err) + + downloadedMD5Value := md5.Sum(data) + downloadedContentMD5 := downloadedMD5Value[:] + + _require.EqualValues(downloadedContentMD5, contentMD5) + + gResp2, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, fileSize) + + rangeList, err := fClient.GetRangeList(context.Background(), nil) + _require.NoError(err) + _require.Len(rangeList.Ranges, 1) + _require.Equal(*rangeList.Ranges[0].Start, int64(0)) + _require.Equal(*rangeList.Ranges[0].End, fileSize-1) +} + +func (f *FileUnrecordedTestsSuite) TestFileUploadStream() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + var fileSize int64 = 100 * 1024 * 1024 + fClient := shareClient.NewRootDirectoryClient().NewFileClient(testcommon.GenerateFileName(testName)) + _, err = fClient.Create(context.Background(), fileSize, nil) + _require.NoError(err) + + gResp, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp.ContentLength, fileSize) + + content := make([]byte, fileSize) + _, err = rand.Read(content) + _require.NoError(err) + md5Value := md5.Sum(content) + contentMD5 := md5Value[:] + + err = fClient.UploadStream(context.Background(), streaming.NopCloser(bytes.NewReader(content)), &file.UploadStreamOptions{ + Concurrency: 5, + ChunkSize: 4 * 1024 * 1024, + }) + _require.NoError(err) + + dResp, err := fClient.DownloadStream(context.Background(), nil) + _require.NoError(err) + + data, err := io.ReadAll(dResp.Body) + _require.NoError(err) + + downloadedMD5Value := md5.Sum(data) + downloadedContentMD5 := downloadedMD5Value[:] + + _require.EqualValues(downloadedContentMD5, contentMD5) + + gResp2, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, fileSize) + + rangeList, err := fClient.GetRangeList(context.Background(), nil) + _require.NoError(err) + _require.Len(rangeList.Ranges, 1) + _require.Equal(*rangeList.Ranges[0].Start, int64(0)) + _require.Equal(*rangeList.Ranges[0].End, fileSize-1) +} + +func (f *FileUnrecordedTestsSuite) TestFileDownloadBuffer() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + var fileSize int64 = 100 * 1024 * 1024 + fClient := shareClient.NewRootDirectoryClient().NewFileClient(testcommon.GenerateFileName(testName)) + _, err = fClient.Create(context.Background(), fileSize, nil) + _require.NoError(err) + + gResp, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp.ContentLength, fileSize) + + content := make([]byte, fileSize) + _, err = rand.Read(content) + _require.NoError(err) + md5Value := md5.Sum(content) + contentMD5 := md5Value[:] + + err = fClient.UploadBuffer(context.Background(), content, &file.UploadBufferOptions{ + Concurrency: 5, + ChunkSize: 4 * 1024 * 1024, + }) + _require.NoError(err) + + destBuffer := make([]byte, fileSize) + cnt, err := fClient.DownloadBuffer(context.Background(), destBuffer, &file.DownloadBufferOptions{ + ChunkSize: 10 * 1024 * 1024, + Concurrency: 5, + }) + _require.NoError(err) + _require.Equal(cnt, fileSize) + + downloadedMD5Value := md5.Sum(destBuffer) + downloadedContentMD5 := downloadedMD5Value[:] + + _require.EqualValues(downloadedContentMD5, contentMD5) + + gResp2, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, fileSize) + + rangeList, err := fClient.GetRangeList(context.Background(), nil) + _require.NoError(err) + _require.Len(rangeList.Ranges, 1) + _require.Equal(*rangeList.Ranges[0].Start, int64(0)) + _require.Equal(*rangeList.Ranges[0].End, fileSize-1) +} + +func (f *FileUnrecordedTestsSuite) TestFileDownloadFile() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + var fileSize int64 = 100 * 1024 * 1024 + fClient := shareClient.NewRootDirectoryClient().NewFileClient(testcommon.GenerateFileName(testName)) + _, err = fClient.Create(context.Background(), fileSize, nil) + _require.NoError(err) + + gResp, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp.ContentLength, fileSize) + + content := make([]byte, fileSize) + _, err = rand.Read(content) + _require.NoError(err) + md5Value := md5.Sum(content) + contentMD5 := md5Value[:] + + err = fClient.UploadBuffer(context.Background(), content, &file.UploadBufferOptions{ + Concurrency: 5, + ChunkSize: 4 * 1024 * 1024, + }) + _require.NoError(err) + + destFileName := "BigFile-downloaded.bin" + destFile, err := os.Create(destFileName) + _require.NoError(err) + defer func(name string) { + err = os.Remove(name) + _require.NoError(err) + }(destFileName) + defer func(destFile *os.File) { + err = destFile.Close() + _require.NoError(err) + }(destFile) + + cnt, err := fClient.DownloadFile(context.Background(), destFile, &file.DownloadFileOptions{ + ChunkSize: 10 * 1024 * 1024, + Concurrency: 5, + }) + _require.NoError(err) + _require.Equal(cnt, fileSize) + + hash := md5.New() + _, err = io.Copy(hash, destFile) + _require.NoError(err) + downloadedContentMD5 := hash.Sum(nil) + + _require.EqualValues(downloadedContentMD5, contentMD5) + + gResp2, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, fileSize) + + rangeList, err := fClient.GetRangeList(context.Background(), nil) + _require.NoError(err) + _require.Len(rangeList.Ranges, 1) + _require.Equal(*rangeList.Ranges[0].Start, int64(0)) + _require.Equal(*rangeList.Ranges[0].End, fileSize-1) +} + +func (f *FileRecordedTestsSuite) TestUploadDownloadDefaultNonDefaultMD5() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := testcommon.CreateNewFileFromShare(context.Background(), _require, "src"+testcommon.GenerateFileName(testName), 2048, shareClient) + defer testcommon.DeleteFile(context.Background(), _require, fClient) + + contentR, contentD := testcommon.GenerateData(2048) + + pResp, err := fClient.UploadRange(context.Background(), 0, contentR, nil) + _require.NoError(err) + _require.NotNil(pResp.ContentMD5) + _require.NotNil(pResp.IsServerEncrypted) + _require.NotNil(pResp.ETag) + _require.Equal(pResp.LastModified.IsZero(), false) + _require.NotNil(pResp.RequestID) + _require.NotNil(pResp.Version) + _require.Equal(pResp.Date.IsZero(), false) + + // Get with rangeGetContentMD5 enabled. + // Partial data, check status code 206. + resp, err := fClient.DownloadStream(context.Background(), &file.DownloadStreamOptions{ + Range: file.HTTPRange{Offset: 0, Count: 1024}, + RangeGetContentMD5: to.Ptr(true), + }) + _require.NoError(err) + _require.Equal(*resp.ContentLength, int64(1024)) + _require.NotNil(resp.ContentMD5) + _require.Equal(*resp.ContentType, "application/octet-stream") + + downloadedData, err := ioutil.ReadAll(resp.Body) + _require.NoError(err) + _require.EqualValues(downloadedData, contentD[:1024]) + + // Set ContentMD5 for the entire file. + _, err = fClient.SetHTTPHeaders(context.Background(), &file.SetHTTPHeadersOptions{ + HTTPHeaders: &file.HTTPHeaders{ + ContentMD5: pResp.ContentMD5, + ContentLanguage: to.Ptr("test")}, + }) + _require.NoError(err) + + // Test get with another type of range index, and validate if FileContentMD5 can be got correct. + resp, err = fClient.DownloadStream(context.Background(), &file.DownloadStreamOptions{ + Range: file.HTTPRange{Offset: 1024, Count: file.CountToEnd}, + }) + _require.NoError(err) + _require.Equal(*resp.ContentLength, int64(1024)) + _require.Nil(resp.ContentMD5) + _require.EqualValues(resp.FileContentMD5, pResp.ContentMD5) + _require.Equal(*resp.ContentLanguage, "test") + // Note: when it's downloading range, range's MD5 is returned, when set rangeGetContentMD5=true, currently set it to false, so should be empty + + downloadedData, err = ioutil.ReadAll(resp.Body) + _require.NoError(err) + _require.EqualValues(downloadedData, contentD[1024:]) + + _require.Equal(*resp.AcceptRanges, "bytes") + _require.Nil(resp.CacheControl) + _require.Nil(resp.ContentDisposition) + _require.Nil(resp.ContentEncoding) + _require.Equal(*resp.ContentRange, "bytes 1024-2047/2048") + _require.Nil(resp.ContentType) // Note ContentType is set to empty during SetHTTPHeaders + _require.Nil(resp.CopyID) + _require.Nil(resp.CopyProgress) + _require.Nil(resp.CopySource) + _require.Nil(resp.CopyStatus) + _require.Nil(resp.CopyStatusDescription) + _require.Equal(resp.Date.IsZero(), false) + _require.NotEqual(*resp.ETag, "") + _require.Equal(resp.LastModified.IsZero(), false) + _require.Nil(resp.Metadata) + _require.NotEqual(*resp.RequestID, "") + _require.NotEqual(*resp.Version, "") + _require.NotNil(resp.IsServerEncrypted) + + // Get entire fClient, check status code 200. + resp, err = fClient.DownloadStream(context.Background(), nil) + _require.NoError(err) + _require.Equal(*resp.ContentLength, int64(2048)) + _require.EqualValues(resp.ContentMD5, pResp.ContentMD5) // Note: This case is inted to get entire fClient, entire file's MD5 will be returned. + _require.Nil(resp.FileContentMD5) // Note: FileContentMD5 is returned, only when range is specified explicitly. + + downloadedData, err = ioutil.ReadAll(resp.Body) + _require.NoError(err) + _require.EqualValues(downloadedData, contentD[:]) + + _require.Equal(*resp.AcceptRanges, "bytes") + _require.Nil(resp.CacheControl) + _require.Nil(resp.ContentDisposition) + _require.Nil(resp.ContentEncoding) + _require.Nil(resp.ContentRange) // Note: ContentRange is returned, only when range is specified explicitly. + _require.Nil(resp.ContentType) + _require.Nil(resp.CopyCompletionTime) + _require.Nil(resp.CopyID) + _require.Nil(resp.CopyProgress) + _require.Nil(resp.CopySource) + _require.Nil(resp.CopyStatus) + _require.Nil(resp.CopyStatusDescription) + _require.Equal(resp.Date.IsZero(), false) + _require.NotEqual(*resp.ETag, "") + _require.Equal(resp.LastModified.IsZero(), false) + _require.Nil(resp.Metadata) + _require.NotEqual(*resp.RequestID, "") + _require.NotEqual(*resp.Version, "") + _require.NotNil(resp.IsServerEncrypted) +} + +func (f *FileRecordedTestsSuite) TestFileDownloadDataNonExistentFile() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := testcommon.GetFileClientFromShare(testcommon.GenerateFileName(testName), shareClient) + + _, err = fClient.DownloadStream(context.Background(), nil) + testcommon.ValidateFileErrorCode(_require, err, fileerror.ResourceNotFound) +} + +func (f *FileRecordedTestsSuite) TestFileDownloadDataOffsetOutOfRange() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := testcommon.CreateNewFileFromShare(context.Background(), _require, testcommon.GenerateFileName(testName), 0, shareClient) + + _, err = fClient.DownloadStream(context.Background(), &file.DownloadStreamOptions{ + Range: file.HTTPRange{ + Offset: int64(len(testcommon.FileDefaultData)), + Count: file.CountToEnd, + }, + }) + testcommon.ValidateFileErrorCode(_require, err, fileerror.InvalidRange) +} + +func (f *FileRecordedTestsSuite) TestFileDownloadDataEntireFile() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := testcommon.CreateNewFileFromShareWithData(context.Background(), _require, testcommon.GenerateFileName(testName), shareClient) + + resp, err := fClient.DownloadStream(context.Background(), nil) + _require.NoError(err) + + // Specifying a count of 0 results in the value being ignored + data, err := ioutil.ReadAll(resp.Body) + _require.NoError(err) + _require.EqualValues(string(data), testcommon.FileDefaultData) +} + +func (f *FileRecordedTestsSuite) TestFileDownloadDataCountExact() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := testcommon.CreateNewFileFromShareWithData(context.Background(), _require, testcommon.GenerateFileName(testName), shareClient) + + resp, err := fClient.DownloadStream(context.Background(), &file.DownloadStreamOptions{ + Range: file.HTTPRange{ + Offset: 0, + Count: int64(len(testcommon.FileDefaultData)), + }, + }) + _require.NoError(err) + + data, err := ioutil.ReadAll(resp.Body) + _require.NoError(err) + _require.EqualValues(string(data), testcommon.FileDefaultData) +} + +func (f *FileRecordedTestsSuite) TestFileDownloadDataCountOutOfRange() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := testcommon.CreateNewFileFromShareWithData(context.Background(), _require, testcommon.GenerateFileName(testName), shareClient) + + resp, err := fClient.DownloadStream(context.Background(), &file.DownloadStreamOptions{ + Range: file.HTTPRange{ + Offset: 0, + Count: int64(len(testcommon.FileDefaultData)) * 2, + }, + }) + _require.NoError(err) + + data, err := ioutil.ReadAll(resp.Body) + _require.NoError(err) + _require.EqualValues(string(data), testcommon.FileDefaultData) +} + +func (f *FileRecordedTestsSuite) TestFileUploadRangeNilBody() { + _require := require.New(f.T()) + testName := f.T().Name() + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := testcommon.CreateNewFileFromShare(context.Background(), _require, "src"+testcommon.GenerateFileName(testName), 0, shareClient) + + _, err = fClient.UploadRange(context.Background(), 0, nil, nil) + _require.Error(err) + _require.Contains(err.Error(), "body must not be nil") +} + +func (f *FileRecordedTestsSuite) TestFileUploadRangeEmptyBody() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := testcommon.CreateNewFileFromShare(context.Background(), _require, testcommon.GenerateFileName(testName), 0, shareClient) + + _, err = fClient.UploadRange(context.Background(), 0, streaming.NopCloser(bytes.NewReader([]byte{})), nil) + _require.Error(err) + _require.Contains(err.Error(), "body must contain readable data whose size is > 0") +} + +func (f *FileRecordedTestsSuite) TestFileUploadRangeNonExistentFile() { + _require := require.New(f.T()) + testName := f.T().Name() + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := testcommon.GetFileClientFromShare(testcommon.GenerateFileName(testName), shareClient) + + rsc, _ := testcommon.GenerateData(12) + _, err = fClient.UploadRange(context.Background(), 0, rsc, nil) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.ResourceNotFound) +} + +func (f *FileRecordedTestsSuite) TestFileUploadRangeTransactionalMD5() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := testcommon.CreateNewFileFromShare(context.Background(), _require, testcommon.GenerateFileName(testName), 2048, shareClient) + + contentR, contentD := testcommon.GenerateData(2048) + _md5 := md5.Sum(contentD) + + // Upload range with correct transactional MD5 + pResp, err := fClient.UploadRange(context.Background(), 0, contentR, &file.UploadRangeOptions{ + TransactionalValidation: file.TransferValidationTypeMD5(_md5[:]), + }) + _require.NoError(err) + _require.NotNil(pResp.ContentMD5) + _require.NotNil(pResp.ETag) + _require.Equal(pResp.LastModified.IsZero(), false) + _require.NotNil(pResp.RequestID) + _require.NotNil(pResp.Version) + _require.Equal(pResp.Date.IsZero(), false) + _require.EqualValues(pResp.ContentMD5, _md5[:]) + + // Upload range with empty MD5, nil MD5 is covered by other cases. + pResp, err = fClient.UploadRange(context.Background(), 1024, streaming.NopCloser(bytes.NewReader(contentD[1024:])), nil) + _require.NoError(err) + _require.NotNil(pResp.ContentMD5) + + resp, err := fClient.DownloadStream(context.Background(), nil) + _require.NoError(err) + _require.Equal(*resp.ContentLength, int64(2048)) + + downloadedData, err := ioutil.ReadAll(resp.Body) + _require.NoError(err) + _require.EqualValues(downloadedData, contentD[:]) +} + +func (f *FileRecordedTestsSuite) TestFileUploadRangeIncorrectTransactionalMD5() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := testcommon.CreateNewFileFromShare(context.Background(), _require, testcommon.GenerateFileName(testName), 2048, shareClient) + + contentR, _ := testcommon.GenerateData(2048) + _, incorrectMD5 := testcommon.GenerateData(16) + + // Upload range with incorrect transactional MD5 + _, err = fClient.UploadRange(context.Background(), 0, contentR, &file.UploadRangeOptions{ + TransactionalValidation: file.TransferValidationTypeMD5(incorrectMD5[:]), + }) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.MD5Mismatch) +} + +// Testings for GetRangeList and ClearRange +func (f *FileRecordedTestsSuite) TestGetRangeListNonDefaultExact() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := testcommon.GetFileClientFromShare(testcommon.GenerateFileName(testName), shareClient) + + fileSize := int64(5 * 1024) + _, err = fClient.Create(context.Background(), fileSize, &file.CreateOptions{HTTPHeaders: &file.HTTPHeaders{}}) + _require.NoError(err) + defer testcommon.DeleteFile(context.Background(), _require, fClient) + + rsc, _ := testcommon.GenerateData(1024) + putResp, err := fClient.UploadRange(context.Background(), 0, rsc, nil) + _require.NoError(err) + _require.Equal(putResp.LastModified.IsZero(), false) + _require.NotNil(putResp.ETag) + _require.NotNil(putResp.ContentMD5) + _require.NotNil(putResp.RequestID) + _require.NotNil(putResp.Version) + _require.Equal(putResp.Date.IsZero(), false) + + rangeList, err := fClient.GetRangeList(context.Background(), &file.GetRangeListOptions{ + Range: file.HTTPRange{ + Offset: 0, + Count: fileSize, + }, + }) + _require.NoError(err) + _require.Equal(rangeList.LastModified.IsZero(), false) + _require.NotNil(rangeList.ETag) + _require.Equal(*rangeList.FileContentLength, fileSize) + _require.NotNil(rangeList.RequestID) + _require.NotNil(rangeList.Version) + _require.Equal(rangeList.Date.IsZero(), false) + _require.Len(rangeList.Ranges, 1) + _require.Equal(*rangeList.Ranges[0].Start, int64(0)) + _require.Equal(*rangeList.Ranges[0].End, int64(1023)) +} + +// Default means clear the entire file's range +func (f *FileRecordedTestsSuite) TestClearRangeDefault() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := testcommon.CreateNewFileFromShare(context.Background(), _require, testcommon.GenerateFileName(testName), 2048, shareClient) + defer testcommon.DeleteFile(context.Background(), _require, fClient) + + rsc, _ := testcommon.GenerateData(2048) + _, err = fClient.UploadRange(context.Background(), 0, rsc, nil) + _require.NoError(err) + + _, err = fClient.ClearRange(context.Background(), file.HTTPRange{Offset: 0, Count: 2048}, nil) + _require.NoError(err) + + rangeList, err := fClient.GetRangeList(context.Background(), &file.GetRangeListOptions{ + Range: file.HTTPRange{Offset: 0, Count: file.CountToEnd}, + }) + _require.NoError(err) + _require.Len(rangeList.Ranges, 0) +} + +func (f *FileRecordedTestsSuite) TestClearRangeNonDefault() { + _require := require.New(f.T()) + testName := f.T().Name() + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := testcommon.CreateNewFileFromShare(context.Background(), _require, testcommon.GenerateFileName(testName), 4096, shareClient) + defer testcommon.DeleteFile(context.Background(), _require, fClient) + + rsc, _ := testcommon.GenerateData(2048) + _, err = fClient.UploadRange(context.Background(), 2048, rsc, nil) + _require.NoError(err) + + _, err = fClient.ClearRange(context.Background(), file.HTTPRange{Offset: 2048, Count: 2048}, nil) + _require.NoError(err) + + rangeList, err := fClient.GetRangeList(context.Background(), nil) + _require.NoError(err) + _require.Len(rangeList.Ranges, 0) +} + +func (f *FileRecordedTestsSuite) TestClearRangeMultipleRanges() { + _require := require.New(f.T()) + testName := f.T().Name() + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := testcommon.CreateNewFileFromShare(context.Background(), _require, testcommon.GenerateFileName(testName), 2048, shareClient) + defer testcommon.DeleteFile(context.Background(), _require, fClient) + + rsc, _ := testcommon.GenerateData(2048) + _, err = fClient.UploadRange(context.Background(), 0, rsc, nil) + _require.NoError(err) + + _, err = fClient.ClearRange(context.Background(), file.HTTPRange{Offset: 1024, Count: 1024}, nil) + _require.NoError(err) + + rangeList, err := fClient.GetRangeList(context.Background(), nil) + _require.NoError(err) + _require.Len(rangeList.Ranges, 1) + _require.EqualValues(*rangeList.Ranges[0], file.ShareFileRange{Start: to.Ptr(int64(0)), End: to.Ptr(int64(1023))}) +} + +// When not 512 aligned, clear range will set 0 the non-512 aligned range, and will not eliminate the range. +func (f *FileRecordedTestsSuite) TestClearRangeNonDefaultCount() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := testcommon.CreateNewFileFromShare(context.Background(), _require, testcommon.GenerateFileName(testName), 1, shareClient) + defer testcommon.DeleteFile(context.Background(), _require, fClient) + + d := []byte{65} + _, err = fClient.UploadRange(context.Background(), 0, streaming.NopCloser(bytes.NewReader(d)), nil) + _require.NoError(err) + + rangeList, err := fClient.GetRangeList(context.Background(), &file.GetRangeListOptions{ + Range: file.HTTPRange{Offset: 0, Count: file.CountToEnd}, + }) + _require.NoError(err) + _require.Len(rangeList.Ranges, 1) + _require.EqualValues(*rangeList.Ranges[0], file.ShareFileRange{Start: to.Ptr(int64(0)), End: to.Ptr(int64(0))}) + + _, err = fClient.ClearRange(context.Background(), file.HTTPRange{Offset: 0, Count: 1}, nil) + _require.NoError(err) + + rangeList, err = fClient.GetRangeList(context.Background(), &file.GetRangeListOptions{ + Range: file.HTTPRange{Offset: 0, Count: file.CountToEnd}, + }) + _require.NoError(err) + _require.Len(rangeList.Ranges, 1) + _require.EqualValues(*rangeList.Ranges[0], file.ShareFileRange{Start: to.Ptr(int64(0)), End: to.Ptr(int64(0))}) + + dResp, err := fClient.DownloadStream(context.Background(), nil) + _require.NoError(err) + + _bytes, err := ioutil.ReadAll(dResp.Body) + _require.NoError(err) + _require.EqualValues(_bytes, []byte{0}) +} + +func (f *FileRecordedTestsSuite) TestFileClearRangeNegativeInvalidCount() { + _require := require.New(f.T()) + testName := f.T().Name() + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.GetShareClient(testcommon.GenerateShareName(testName), svcClient) + fClient := testcommon.GetFileClientFromShare(testcommon.GenerateFileName(testName), shareClient) + + _, err = fClient.ClearRange(context.Background(), file.HTTPRange{Offset: 0, Count: 0}, nil) + _require.Error(err) + _require.Contains(err.Error(), "invalid argument: either offset is < 0 or count <= 0") +} + +func (f *FileRecordedTestsSuite) TestFileGetRangeListDefaultEmptyFile() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := testcommon.CreateNewFileFromShare(context.Background(), _require, testcommon.GenerateFileName(testName), 0, shareClient) + + resp, err := fClient.GetRangeList(context.Background(), nil) + _require.NoError(err) + _require.Len(resp.Ranges, 0) +} + +func setupGetRangeListTest(_require *require.Assertions, testName string, fileSize int64, shareClient *share.Client) *file.Client { + fClient := testcommon.CreateNewFileFromShare(context.Background(), _require, testcommon.GenerateFileName(testName), fileSize, shareClient) + rsc, _ := testcommon.GenerateData(int(fileSize)) + _, err := fClient.UploadRange(context.Background(), 0, rsc, nil) + _require.NoError(err) + return fClient +} + +func (f *FileRecordedTestsSuite) TestFileGetRangeListDefaultRange() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fileSize := int64(512) + fClient := setupGetRangeListTest(_require, testName, fileSize, shareClient) + + resp, err := fClient.GetRangeList(context.Background(), &file.GetRangeListOptions{ + Range: file.HTTPRange{Offset: 0, Count: file.CountToEnd}, + }) + _require.NoError(err) + _require.Len(resp.Ranges, 1) + _require.EqualValues(*resp.Ranges[0], file.ShareFileRange{Start: to.Ptr(int64(0)), End: to.Ptr(fileSize - 1)}) +} + +func (f *FileRecordedTestsSuite) TestFileGetRangeListNonContiguousRanges() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fileSize := int64(512) + fClient := setupGetRangeListTest(_require, testName, fileSize, shareClient) + + _, err = fClient.Resize(context.Background(), fileSize*3, nil) + _require.NoError(err) + + rsc, _ := testcommon.GenerateData(int(fileSize)) + _, err = fClient.UploadRange(context.Background(), fileSize*2, rsc, nil) + _require.NoError(err) + + resp, err := fClient.GetRangeList(context.Background(), nil) + _require.NoError(err) + _require.Len(resp.Ranges, 2) + _require.EqualValues(*resp.Ranges[0], file.ShareFileRange{Start: to.Ptr(int64(0)), End: to.Ptr(fileSize - 1)}) + _require.EqualValues(*resp.Ranges[1], file.ShareFileRange{Start: to.Ptr(fileSize * 2), End: to.Ptr((fileSize * 3) - 1)}) +} + +func (f *FileRecordedTestsSuite) TestFileGetRangeListNonContiguousRangesCountLess() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fileSize := int64(512) + fClient := setupGetRangeListTest(_require, testName, fileSize, shareClient) + + resp, err := fClient.GetRangeList(context.Background(), &file.GetRangeListOptions{ + Range: file.HTTPRange{Offset: 0, Count: fileSize}, + }) + _require.NoError(err) + _require.Len(resp.Ranges, 1) + _require.EqualValues(int64(0), *(resp.Ranges[0].Start)) + _require.EqualValues(fileSize-1, *(resp.Ranges[0].End)) +} + +func (f *FileRecordedTestsSuite) TestFileGetRangeListNonContiguousRangesCountExceed() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fileSize := int64(512) + fClient := setupGetRangeListTest(_require, testName, fileSize, shareClient) + + resp, err := fClient.GetRangeList(context.Background(), &file.GetRangeListOptions{ + Range: file.HTTPRange{Offset: 0, Count: fileSize + 1}, + }) + _require.NoError(err) + _require.NoError(err) + _require.Len(resp.Ranges, 1) + _require.EqualValues(*resp.Ranges[0], file.ShareFileRange{Start: to.Ptr(int64(0)), End: to.Ptr(fileSize - 1)}) +} + +func (f *FileRecordedTestsSuite) TestFileGetRangeListSnapshot() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer func() { + _, err := shareClient.Delete(context.Background(), &share.DeleteOptions{DeleteSnapshots: to.Ptr(share.DeleteSnapshotsOptionTypeInclude)}) + _require.NoError(err) + }() + + fileSize := int64(512) + fClient := setupGetRangeListTest(_require, testName, fileSize, shareClient) + + resp, _ := shareClient.CreateSnapshot(context.Background(), nil) + _require.NotNil(resp.Snapshot) + + resp2, err := fClient.GetRangeList(context.Background(), &file.GetRangeListOptions{ + Range: file.HTTPRange{Offset: 0, Count: file.CountToEnd}, + ShareSnapshot: resp.Snapshot, + }) + _require.NoError(err) + _require.Len(resp2.Ranges, 1) + _require.EqualValues(*resp2.Ranges[0], file.ShareFileRange{Start: to.Ptr(int64(0)), End: to.Ptr(fileSize - 1)}) +} + +func (f *FileRecordedTestsSuite) TestFileUploadDownloadSmallBuffer() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + var fileSize int64 = 10 * 1024 + fClient := shareClient.NewRootDirectoryClient().NewFileClient(testcommon.GenerateFileName(testName)) + _, err = fClient.Create(context.Background(), fileSize, nil) + _require.NoError(err) + + gResp, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp.ContentLength, fileSize) + + _, content := testcommon.GenerateData(int(fileSize)) + md5Value := md5.Sum(content) + contentMD5 := md5Value[:] + + err = fClient.UploadBuffer(context.Background(), content, &file.UploadBufferOptions{ + Concurrency: 5, + ChunkSize: 2 * 1024, + }) + _require.NoError(err) + + destBuffer := make([]byte, fileSize) + cnt, err := fClient.DownloadBuffer(context.Background(), destBuffer, &file.DownloadBufferOptions{ + ChunkSize: 2 * 1024, + Concurrency: 5, + }) + _require.NoError(err) + _require.Equal(cnt, fileSize) + + downloadedMD5Value := md5.Sum(destBuffer) + downloadedContentMD5 := downloadedMD5Value[:] + + _require.EqualValues(downloadedContentMD5, contentMD5) + + gResp2, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, fileSize) + + rangeList, err := fClient.GetRangeList(context.Background(), nil) + _require.NoError(err) + _require.Len(rangeList.Ranges, 1) + _require.Equal(*rangeList.Ranges[0].Start, int64(0)) + _require.Equal(*rangeList.Ranges[0].End, fileSize-1) +} + +func (f *FileRecordedTestsSuite) TestFileUploadDownloadSmallFile() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + var fileSize int64 = 10 * 1024 + fClient := shareClient.NewRootDirectoryClient().NewFileClient(testcommon.GenerateFileName(testName)) + _, err = fClient.Create(context.Background(), fileSize, nil) + _require.NoError(err) + + gResp, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp.ContentLength, fileSize) + + // create local file + _, content := testcommon.GenerateData(int(fileSize)) + srcFileName := "testFileUpload" + err = ioutil.WriteFile(srcFileName, content, 0644) + _require.NoError(err) + defer func() { + err = os.Remove(srcFileName) + _require.NoError(err) + }() + fh, err := os.Open(srcFileName) + _require.NoError(err) + defer func(fh *os.File) { + err := fh.Close() + _require.NoError(err) + }(fh) + + srcHash := md5.New() + _, err = io.Copy(srcHash, fh) + _require.NoError(err) + contentMD5 := srcHash.Sum(nil) + + err = fClient.UploadFile(context.Background(), fh, &file.UploadFileOptions{ + Concurrency: 5, + ChunkSize: 2 * 1024, + }) + _require.NoError(err) + + destFileName := "SmallFile-downloaded.bin" + destFile, err := os.Create(destFileName) + _require.NoError(err) + defer func(name string) { + err = os.Remove(name) + _require.NoError(err) + }(destFileName) + defer func(destFile *os.File) { + err = destFile.Close() + _require.NoError(err) + }(destFile) + + cnt, err := fClient.DownloadFile(context.Background(), destFile, &file.DownloadFileOptions{ + ChunkSize: 2 * 1024, + Concurrency: 5, + }) + _require.NoError(err) + _require.Equal(cnt, fileSize) + + destHash := md5.New() + _, err = io.Copy(destHash, destFile) + _require.NoError(err) + downloadedContentMD5 := destHash.Sum(nil) + + _require.EqualValues(downloadedContentMD5, contentMD5) + + gResp2, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, fileSize) + + rangeList, err := fClient.GetRangeList(context.Background(), nil) + _require.NoError(err) + _require.Len(rangeList.Ranges, 1) + _require.Equal(*rangeList.Ranges[0].Start, int64(0)) + _require.Equal(*rangeList.Ranges[0].End, fileSize-1) +} + +func (f *FileRecordedTestsSuite) TestFileUploadDownloadSmallStream() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + var fileSize int64 = 10 * 1024 + fClient := shareClient.NewRootDirectoryClient().NewFileClient(testcommon.GenerateFileName(testName)) + _, err = fClient.Create(context.Background(), fileSize, nil) + _require.NoError(err) + + gResp, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp.ContentLength, fileSize) + + _, content := testcommon.GenerateData(int(fileSize)) + md5Value := md5.Sum(content) + contentMD5 := md5Value[:] + + err = fClient.UploadStream(context.Background(), streaming.NopCloser(bytes.NewReader(content)), &file.UploadStreamOptions{ + Concurrency: 5, + ChunkSize: 2 * 1024, + }) + _require.NoError(err) + + dResp, err := fClient.DownloadStream(context.Background(), nil) + _require.NoError(err) + + data, err := io.ReadAll(dResp.Body) + _require.NoError(err) + + downloadedMD5Value := md5.Sum(data) + downloadedContentMD5 := downloadedMD5Value[:] + + _require.EqualValues(downloadedContentMD5, contentMD5) + + gResp2, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, fileSize) + + rangeList, err := fClient.GetRangeList(context.Background(), nil) + _require.NoError(err) + _require.Len(rangeList.Ranges, 1) + _require.Equal(*rangeList.Ranges[0].Start, int64(0)) + _require.Equal(*rangeList.Ranges[0].End, fileSize-1) +} + +func (f *FileRecordedTestsSuite) TestFileUploadDownloadWithProgress() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + var fileSize int64 = 10 * 1024 + fClient := shareClient.NewRootDirectoryClient().NewFileClient(testcommon.GenerateFileName(testName)) + _, err = fClient.Create(context.Background(), fileSize, nil) + _require.NoError(err) + + gResp, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp.ContentLength, fileSize) + + _, content := testcommon.GenerateData(int(fileSize)) + md5Value := md5.Sum(content) + contentMD5 := md5Value[:] + + bytesUploaded := int64(0) + err = fClient.UploadBuffer(context.Background(), content, &file.UploadBufferOptions{ + Concurrency: 5, + ChunkSize: 2 * 1024, + Progress: func(bytesTransferred int64) { + _require.GreaterOrEqual(bytesTransferred, bytesUploaded) + bytesUploaded = bytesTransferred + }, + }) + _require.NoError(err) + _require.Equal(bytesUploaded, fileSize) + + destBuffer := make([]byte, fileSize) + bytesDownloaded := int64(0) + cnt, err := fClient.DownloadBuffer(context.Background(), destBuffer, &file.DownloadBufferOptions{ + ChunkSize: 2 * 1024, + Concurrency: 5, + Progress: func(bytesTransferred int64) { + _require.GreaterOrEqual(bytesTransferred, bytesDownloaded) + bytesDownloaded = bytesTransferred + }, + }) + _require.NoError(err) + _require.Equal(cnt, fileSize) + _require.Equal(bytesDownloaded, fileSize) + + downloadedMD5Value := md5.Sum(destBuffer) + downloadedContentMD5 := downloadedMD5Value[:] + + _require.EqualValues(downloadedContentMD5, contentMD5) + + gResp2, err := fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(*gResp2.ContentLength, fileSize) + + rangeList, err := fClient.GetRangeList(context.Background(), nil) + _require.NoError(err) + _require.Len(rangeList.Ranges, 1) + _require.Equal(*rangeList.Ranges[0].Start, int64(0)) + _require.Equal(*rangeList.Ranges[0].End, fileSize-1) +} + +func (f *FileRecordedTestsSuite) TestFileListHandlesDefault() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := testcommon.CreateNewFileFromShare(context.Background(), _require, testcommon.GenerateFileName(testName), 2048, shareClient) + + resp, err := fClient.ListHandles(context.Background(), nil) + _require.NoError(err) + _require.Len(resp.Handles, 0) + _require.NotNil(resp.NextMarker) + _require.Equal(*resp.NextMarker, "") +} + +func (f *FileRecordedTestsSuite) TestFileForceCloseHandlesDefault() { + _require := require.New(f.T()) + testName := f.T().Name() + + svcClient, err := testcommon.GetServiceClient(f.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + fClient := testcommon.CreateNewFileFromShare(context.Background(), _require, testcommon.GenerateFileName(testName), 2048, shareClient) + + resp, err := fClient.ForceCloseHandles(context.Background(), "*", nil) + _require.NoError(err) + _require.EqualValues(*resp.NumberOfHandlesClosed, 0) + _require.EqualValues(*resp.NumberOfHandlesFailedToClose, 0) + _require.Nil(resp.Marker) +} + +// TODO: Add tests for retry header options + +// TODO: fix links in README: source, file_error, samples diff --git a/sdk/storage/azfile/file/constants.go b/sdk/storage/azfile/file/constants.go new file mode 100644 index 000000000000..c5687bd1b3b5 --- /dev/null +++ b/sdk/storage/azfile/file/constants.go @@ -0,0 +1,78 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package file + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" +) + +const ( + _1MiB = 1024 * 1024 + CountToEnd = 0 + + // MaxUpdateRangeBytes indicates the maximum number of bytes that can be updated in a call to Client.UploadRange. + MaxUpdateRangeBytes = 4 * 1024 * 1024 // 4MiB + + // MaxFileSize indicates the maximum size of the file allowed. + MaxFileSize = 4 * 1024 * 1024 * 1024 * 1024 // 4 TiB + + // DefaultDownloadChunkSize is default chunk size + DefaultDownloadChunkSize = int64(4 * 1024 * 1024) // 4MiB +) + +// CopyStatusType defines the states of the copy operation. +type CopyStatusType = generated.CopyStatusType + +const ( + CopyStatusTypePending CopyStatusType = generated.CopyStatusTypePending + CopyStatusTypeSuccess CopyStatusType = generated.CopyStatusTypeSuccess + CopyStatusTypeAborted CopyStatusType = generated.CopyStatusTypeAborted + CopyStatusTypeFailed CopyStatusType = generated.CopyStatusTypeFailed +) + +// PossibleCopyStatusTypeValues returns the possible values for the CopyStatusType const type. +func PossibleCopyStatusTypeValues() []CopyStatusType { + return generated.PossibleCopyStatusTypeValues() +} + +// PermissionCopyModeType determines the copy behavior of the security descriptor of the file. +// - source: The security descriptor on the destination file is copied from the source file. +// - override: The security descriptor on the destination file is determined via the x-ms-file-permission or x-ms-file-permission-key header. +type PermissionCopyModeType = generated.PermissionCopyModeType + +const ( + PermissionCopyModeTypeSource PermissionCopyModeType = generated.PermissionCopyModeTypeSource + PermissionCopyModeTypeOverride PermissionCopyModeType = generated.PermissionCopyModeTypeOverride +) + +// PossiblePermissionCopyModeTypeValues returns the possible values for the PermissionCopyModeType const type. +func PossiblePermissionCopyModeTypeValues() []PermissionCopyModeType { + return generated.PossiblePermissionCopyModeTypeValues() +} + +// RangeWriteType represents one of the following options. +// - update: Writes the bytes specified by the request body into the specified range. The Range and Content-Length headers must match to perform the update. +// - clear: Clears the specified range and releases the space used in storage for that range. To clear a range, set the Content-Length header to zero, +// and set the Range header to a value that indicates the range to clear, up to maximum file size. +type RangeWriteType = generated.FileRangeWriteType + +const ( + RangeWriteTypeUpdate RangeWriteType = generated.FileRangeWriteTypeUpdate + RangeWriteTypeClear RangeWriteType = generated.FileRangeWriteTypeClear +) + +// PossibleRangeWriteTypeValues returns the possible values for the RangeWriteType const type. +func PossibleRangeWriteTypeValues() []RangeWriteType { + return generated.PossibleFileRangeWriteTypeValues() +} + +// TransferValidationType abstracts the various mechanisms used to verify a transfer. +type TransferValidationType = exported.TransferValidationType + +// TransferValidationTypeMD5 is a TransferValidationType used to provide a precomputed MD5. +type TransferValidationTypeMD5 = exported.TransferValidationTypeMD5 diff --git a/sdk/storage/azfile/file/examples_test.go b/sdk/storage/azfile/file/examples_test.go new file mode 100644 index 000000000000..b54aa5aab215 --- /dev/null +++ b/sdk/storage/azfile/file/examples_test.go @@ -0,0 +1,650 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package file_test + +import ( + "bytes" + "context" + "crypto/rand" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/sas" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/service" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/share" + "io" + "io/ioutil" + "log" + "os" + "strings" + "time" +) + +func handleError(err error) { + if err != nil { + log.Fatal(err.Error()) + } +} + +const random64BString string = "2SDgZj6RkKYzJpu04sweQek4uWHO8ndPnYlZ0tnFS61hjnFZ5IkvIGGY44eKABov" + +func generateData(sizeInBytes int) (io.ReadSeekCloser, []byte) { + data := make([]byte, sizeInBytes) + _len := len(random64BString) + if sizeInBytes > _len { + count := sizeInBytes / _len + if sizeInBytes%_len != 0 { + count = count + 1 + } + copy(data[:], strings.Repeat(random64BString, count)) + } else { + copy(data[:], random64BString) + } + return streaming.NopCloser(bytes.NewReader(data)), data +} + +func Example_client_NewClient_CreateShare_CreateDir_CreateFile() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + serviceURL := fmt.Sprintf("https://%s.file.core.windows.net/", accountName) + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + client, err := service.NewClientWithSharedKeyCredential(serviceURL, cred, nil) + handleError(err) + + shareClient := client.NewShareClient("testShare") + fmt.Println(shareClient.URL()) + + dirClient := shareClient.NewDirectoryClient("testDir") + fmt.Println(dirClient.URL()) + + fileClient := dirClient.NewFileClient("testFile") + fmt.Println(fileClient.URL()) + +} + +func Example_file_NewClientFromConnectionString() { + // Your connection string can be obtained from the Azure Portal. + connectionString, ok := os.LookupEnv("AZURE_STORAGE_CONNECTION_STRING") + if !ok { + log.Fatal("the environment variable 'AZURE_STORAGE_CONNECTION_STRING' could not be found") + } + shareName := "testShare" + filePath := "testDir/testFile" + fileClient, err := file.NewClientFromConnectionString(connectionString, shareName, filePath, nil) + handleError(err) + fmt.Println(fileClient.URL()) +} + +func Example_fileClient_CreateAndDelete() { + // Your connection string can be obtained from the Azure Portal. + connectionString, ok := os.LookupEnv("AZURE_STORAGE_CONNECTION_STRING") + if !ok { + log.Fatal("the environment variable 'AZURE_STORAGE_CONNECTION_STRING' could not be found") + } + shareName := "testShare" + fileName := "testFile" + shareClient, err := share.NewClientFromConnectionString(connectionString, shareName, nil) + handleError(err) + + _, err = shareClient.Create(context.Background(), nil) + handleError(err) + + fileClient := shareClient.NewRootDirectoryClient().NewFileClient(fileName) + _, err = fileClient.Create(context.Background(), 5, nil) + handleError(err) + + _, err = fileClient.Delete(context.Background(), nil) + handleError(err) + + _, err = shareClient.Delete(context.Background(), nil) + handleError(err) +} + +func Example_fileClient_GetProperties() { + // Your connection string can be obtained from the Azure Portal. + connectionString, ok := os.LookupEnv("AZURE_STORAGE_CONNECTION_STRING") + if !ok { + log.Fatal("the environment variable 'AZURE_STORAGE_CONNECTION_STRING' could not be found") + } + shareName := "testShare" + fileName := "testFile" + shareClient, err := share.NewClientFromConnectionString(connectionString, shareName, nil) + handleError(err) + + _, err = shareClient.Create(context.Background(), nil) + handleError(err) + + fileClient := shareClient.NewRootDirectoryClient().NewFileClient(fileName) + _, err = fileClient.Create(context.Background(), 5, nil) + handleError(err) + + _, err = fileClient.GetProperties(context.Background(), nil) + handleError(err) + + _, err = fileClient.Delete(context.Background(), nil) + handleError(err) + + _, err = shareClient.Delete(context.Background(), nil) + handleError(err) + +} + +func Example_fileClient_SetAndGetMetadata() { + // Your connection string can be obtained from the Azure Portal. + connectionString, ok := os.LookupEnv("AZURE_STORAGE_CONNECTION_STRING") + if !ok { + log.Fatal("the environment variable 'AZURE_STORAGE_CONNECTION_STRING' could not be found") + } + shareName := "testShare" + fileName := "testFile" + shareClient, err := share.NewClientFromConnectionString(connectionString, shareName, nil) + handleError(err) + + _, err = shareClient.Create(context.Background(), nil) + handleError(err) + + fileClient := shareClient.NewRootDirectoryClient().NewFileClient(fileName) + _, err = fileClient.Create(context.Background(), 5, nil) + handleError(err) + + opts := file.SetMetadataOptions{Metadata: map[string]*string{"hello": to.Ptr("world")}} + _, err = fileClient.SetMetadata(context.Background(), &opts) + handleError(err) + + get, err := fileClient.GetProperties(context.Background(), nil) + handleError(err) + + if get.Metadata == nil { + log.Fatal("No metadata returned") + } + for k, v := range get.Metadata { + fmt.Print(k + "=" + *v + "\n") + } + + _, err = fileClient.Delete(context.Background(), nil) + handleError(err) + + _, err = shareClient.Delete(context.Background(), nil) + handleError(err) +} + +func Example_fileClient_UploadBuffer() { + // Your connection string can be obtained from the Azure Portal. + connectionString, ok := os.LookupEnv("AZURE_STORAGE_CONNECTION_STRING") + if !ok { + log.Fatal("the environment variable 'AZURE_STORAGE_CONNECTION_STRING' could not be found") + } + shareName := "testShare" + fileName := "testFile" + shareClient, err := share.NewClientFromConnectionString(connectionString, shareName, nil) + handleError(err) + + _, err = shareClient.Create(context.Background(), nil) + handleError(err) + + fileClient := shareClient.NewRootDirectoryClient().NewFileClient(fileName) + _, err = fileClient.Create(context.Background(), 5, nil) + handleError(err) + + data := []byte{'h', 'e', 'l', 'l', 'o'} + err = fileClient.UploadBuffer(context.Background(), data, nil) + handleError(err) + + _, err = fileClient.Delete(context.Background(), nil) + handleError(err) + + _, err = shareClient.Delete(context.Background(), nil) + handleError(err) +} + +func Example_fileClient_UploadStream() { + // Your connection string can be obtained from the Azure Portal. + connectionString, ok := os.LookupEnv("AZURE_STORAGE_CONNECTION_STRING") + if !ok { + log.Fatal("the environment variable 'AZURE_STORAGE_CONNECTION_STRING' could not be found") + } + shareName := "testShare" + fileName := "testFile" + shareClient, err := share.NewClientFromConnectionString(connectionString, shareName, nil) + handleError(err) + + _, err = shareClient.Create(context.Background(), nil) + handleError(err) + + fileClient := shareClient.NewRootDirectoryClient().NewFileClient(fileName) + _, err = fileClient.Create(context.Background(), 5, nil) + handleError(err) + + err = fileClient.UploadStream( + context.TODO(), + streaming.NopCloser(strings.NewReader("Some text")), + nil, + ) + handleError(err) + + _, err = fileClient.Delete(context.Background(), nil) + handleError(err) + + _, err = shareClient.Delete(context.Background(), nil) + handleError(err) +} + +func Example_fileClient_UploadAndClearRange() { + // Your connection string can be obtained from the Azure Portal. + connectionString, ok := os.LookupEnv("AZURE_STORAGE_CONNECTION_STRING") + if !ok { + log.Fatal("the environment variable 'AZURE_STORAGE_CONNECTION_STRING' could not be found") + } + shareName := "testShare" + fileName := "testFile" + shareClient, err := share.NewClientFromConnectionString(connectionString, shareName, nil) + handleError(err) + + _, err = shareClient.Create(context.Background(), nil) + handleError(err) + + fileClient := shareClient.NewRootDirectoryClient().NewFileClient(fileName) + _, err = fileClient.Create(context.Background(), 5, nil) + handleError(err) + + contentR, _ := generateData(5) + + _, err = fileClient.UploadRange(context.Background(), 0, contentR, nil) + handleError(err) + + rangeList, err := fileClient.GetRangeList(context.Background(), nil) + handleError(err) + fmt.Println(rangeList.Ranges) + + _, err = fileClient.ClearRange(context.Background(), file.HTTPRange{Offset: 0, Count: int64(5)}, nil) + handleError(err) + + rangeList2, err := fileClient.GetRangeList(context.Background(), nil) + handleError(err) + + fmt.Println(rangeList2.Ranges, 0) + _, err = fileClient.Delete(context.Background(), nil) + handleError(err) + + _, err = shareClient.Delete(context.Background(), nil) + handleError(err) +} + +func Example_fileClient_StartCopyFromURL() { + // Your connection string can be obtained from the Azure Portal. + connectionString, ok := os.LookupEnv("AZURE_STORAGE_CONNECTION_STRING") + if !ok { + log.Fatal("the environment variable 'AZURE_STORAGE_CONNECTION_STRING' could not be found") + } + shareName := "testShare" + srcFileName := "testFile" + dstFileName := "testFile2" + fileSize := int64(5) + + shareClient, err := share.NewClientFromConnectionString(connectionString, shareName, nil) + handleError(err) + + _, err = shareClient.Create(context.Background(), nil) + handleError(err) + + srcFileClient := shareClient.NewRootDirectoryClient().NewFileClient(srcFileName) + _, err = srcFileClient.Create(context.Background(), fileSize, nil) + handleError(err) + + dstFileClient := shareClient.NewRootDirectoryClient().NewFileClient(dstFileName) + + contentR, _ := generateData(int(fileSize)) + + _, err = srcFileClient.UploadRange(context.Background(), 0, contentR, nil) + handleError(err) + + // you can also use AbortCopy to abort copying + _, err = dstFileClient.StartCopyFromURL(context.Background(), srcFileClient.URL(), nil) + handleError(err) + + _, err = srcFileClient.Delete(context.Background(), nil) + handleError(err) + + _, err = dstFileClient.Delete(context.Background(), nil) + handleError(err) + + _, err = shareClient.Delete(context.Background(), nil) + handleError(err) +} + +func Example_fileClient_DownloadStream() { + // Your connection string can be obtained from the Azure Portal. + connectionString, ok := os.LookupEnv("AZURE_STORAGE_CONNECTION_STRING") + if !ok { + log.Fatal("the environment variable 'AZURE_STORAGE_CONNECTION_STRING' could not be found") + } + shareName := "testShare" + srcFileName := "testFile" + fileSize := int64(5) + + shareClient, err := share.NewClientFromConnectionString(connectionString, shareName, nil) + handleError(err) + + _, err = shareClient.Create(context.Background(), nil) + handleError(err) + + srcFileClient := shareClient.NewRootDirectoryClient().NewFileClient(srcFileName) + _, err = srcFileClient.Create(context.Background(), fileSize, nil) + handleError(err) + + contentR, _ := generateData(int(fileSize)) + + _, err = srcFileClient.UploadRange(context.Background(), 0, contentR, nil) + handleError(err) + + // validate data copied + resp, err := srcFileClient.DownloadStream(context.Background(), &file.DownloadStreamOptions{ + Range: file.HTTPRange{Offset: 0, Count: fileSize}, + }) + handleError(err) + + content1, err := io.ReadAll(resp.Body) + handleError(err) + fmt.Println(content1) + + _, err = srcFileClient.Delete(context.Background(), nil) + handleError(err) + + _, err = shareClient.Delete(context.Background(), nil) + handleError(err) +} + +func Example_fileClient_DownloadBuffer() { + // Your connection string can be obtained from the Azure Portal. + connectionString, ok := os.LookupEnv("AZURE_STORAGE_CONNECTION_STRING") + if !ok { + log.Fatal("the environment variable 'AZURE_STORAGE_CONNECTION_STRING' could not be found") + } + shareName := "testShare" + srcFileName := "testFile" + fileSize := int64(5) + + shareClient, err := share.NewClientFromConnectionString(connectionString, shareName, nil) + handleError(err) + + _, err = shareClient.Create(context.Background(), nil) + handleError(err) + + srcFileClient := shareClient.NewRootDirectoryClient().NewFileClient(srcFileName) + _, err = srcFileClient.Create(context.Background(), fileSize, nil) + handleError(err) + + content := make([]byte, fileSize) + _, err = rand.Read(content) + handleError(err) + + err = srcFileClient.UploadBuffer(context.Background(), content, nil) + handleError(err) + + destBuffer := make([]byte, fileSize) + _, err = srcFileClient.DownloadBuffer(context.Background(), destBuffer, nil) + handleError(err) + + _, err = srcFileClient.Delete(context.Background(), nil) + handleError(err) + + _, err = shareClient.Delete(context.Background(), nil) + handleError(err) +} + +func Example_fileClient_DownloadFile() { + // Your connection string can be obtained from the Azure Portal. + connectionString, ok := os.LookupEnv("AZURE_STORAGE_CONNECTION_STRING") + if !ok { + log.Fatal("the environment variable 'AZURE_STORAGE_CONNECTION_STRING' could not be found") + } + shareName := "testShare" + srcFileName := "testFile" + fileSize := int64(5) + + shareClient, err := share.NewClientFromConnectionString(connectionString, shareName, nil) + handleError(err) + + _, err = shareClient.Create(context.Background(), nil) + handleError(err) + + srcFileClient := shareClient.NewRootDirectoryClient().NewFileClient(srcFileName) + _, err = srcFileClient.Create(context.Background(), fileSize, nil) + handleError(err) + + content := make([]byte, fileSize) + _, err = rand.Read(content) + handleError(err) + + err = srcFileClient.UploadBuffer(context.Background(), content, nil) + handleError(err) + + destFileName := "file.bin" + destFile, err := os.Create(destFileName) + handleError(err) + defer func(name string) { + err = os.Remove(name) + handleError(err) + }(destFileName) + defer func(destFile *os.File) { + err = destFile.Close() + handleError(err) + }(destFile) + + _, err = srcFileClient.DownloadFile(context.Background(), destFile, nil) + handleError(err) + + _, err = srcFileClient.Delete(context.Background(), nil) + handleError(err) + + _, err = shareClient.Delete(context.Background(), nil) + handleError(err) +} + +func Example_fileClient_UploadFile() { + // Your connection string can be obtained from the Azure Portal. + connectionString, ok := os.LookupEnv("AZURE_STORAGE_CONNECTION_STRING") + if !ok { + log.Fatal("the environment variable 'AZURE_STORAGE_CONNECTION_STRING' could not be found") + } + shareName := "testShare" + srcFileName := "testFile" + fileSize := int64(5) + + shareClient, err := share.NewClientFromConnectionString(connectionString, shareName, nil) + handleError(err) + + _, err = shareClient.Create(context.Background(), nil) + handleError(err) + + srcFileClient := shareClient.NewRootDirectoryClient().NewFileClient(srcFileName) + _, err = srcFileClient.Create(context.Background(), fileSize, nil) + handleError(err) + + _, content := generateData(int(fileSize)) + err = ioutil.WriteFile(srcFileName, content, 0644) + handleError(err) + defer func() { + err = os.Remove(srcFileName) + handleError(err) + }() + fh, err := os.Open(srcFileName) + handleError(err) + defer func(fh *os.File) { + err := fh.Close() + handleError(err) + }(fh) + + err = srcFileClient.UploadFile(context.Background(), fh, nil) + + destFileName := "file.bin" + destFile, err := os.Create(destFileName) + handleError(err) + defer func(name string) { + err = os.Remove(name) + handleError(err) + }(destFileName) + defer func(destFile *os.File) { + err = destFile.Close() + handleError(err) + }(destFile) + + _, err = srcFileClient.DownloadFile(context.Background(), destFile, nil) + handleError(err) + + _, err = srcFileClient.Delete(context.Background(), nil) + handleError(err) + + _, err = shareClient.Delete(context.Background(), nil) + handleError(err) +} + +func Example_file_ClientGetSASURL() { + // Your connection string can be obtained from the Azure Portal. + connectionString, ok := os.LookupEnv("AZURE_STORAGE_CONNECTION_STRING") + if !ok { + log.Fatal("the environment variable 'AZURE_STORAGE_CONNECTION_STRING' could not be found") + } + shareName := "testShare" + srcFileName := "testFile" + fileSize := int64(5) + + shareClient, err := share.NewClientFromConnectionString(connectionString, shareName, nil) + handleError(err) + + _, err = shareClient.Create(context.Background(), nil) + handleError(err) + + srcFileClient := shareClient.NewRootDirectoryClient().NewFileClient(srcFileName) + _, err = srcFileClient.Create(context.Background(), fileSize, nil) + handleError(err) + + permission := sas.FilePermissions{Read: true} + start := time.Now() + expiry := start.AddDate(1, 0, 0) + options := file.GetSASURLOptions{StartTime: &start} + sasURL, err := srcFileClient.GetSASURL(permission, expiry, &options) + handleError(err) + _ = sasURL + + _, err = srcFileClient.Delete(context.Background(), nil) + handleError(err) + + _, err = shareClient.Delete(context.Background(), nil) + handleError(err) +} + +func Example_fileClient_Resize() { + // Your connection string can be obtained from the Azure Portal. + connectionString, ok := os.LookupEnv("AZURE_STORAGE_CONNECTION_STRING") + if !ok { + log.Fatal("the environment variable 'AZURE_STORAGE_CONNECTION_STRING' could not be found") + } + shareName := "testShare" + srcFileName := "testFile" + fileSize := int64(5) + + shareClient, err := share.NewClientFromConnectionString(connectionString, shareName, nil) + handleError(err) + + _, err = shareClient.Create(context.Background(), nil) + handleError(err) + + srcFileClient := shareClient.NewRootDirectoryClient().NewFileClient(srcFileName) + _, err = srcFileClient.Create(context.Background(), fileSize, nil) + handleError(err) + + resp1, err := srcFileClient.GetProperties(context.Background(), nil) + handleError(err) + fmt.Println(*resp1.ContentLength) + + _, err = srcFileClient.Resize(context.Background(), 6, nil) + handleError(err) + + resp1, err = srcFileClient.GetProperties(context.Background(), nil) + handleError(err) + fmt.Println(*resp1.ContentLength) + + _, err = srcFileClient.Delete(context.Background(), nil) + handleError(err) + + _, err = shareClient.Delete(context.Background(), nil) + handleError(err) +} + +func Example_fileClient_UploadRangeFromURL() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + serviceURL := fmt.Sprintf("https://%s.file.core.windows.net/", accountName) + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + client, err := service.NewClientWithSharedKeyCredential(serviceURL, cred, nil) + handleError(err) + + shareName := "testShare" + srcFileName := "testFile" + dstFileName := "testFile2" + fileSize := int64(5) + + shareClient := client.NewShareClient(shareName) + _, err = shareClient.Create(context.Background(), nil) + handleError(err) + + srcFileClient := shareClient.NewRootDirectoryClient().NewFileClient(srcFileName) + _, err = srcFileClient.Create(context.Background(), fileSize, nil) + handleError(err) + + contentR, _ := generateData(int(fileSize)) + + _, err = srcFileClient.UploadRange(context.Background(), 0, contentR, nil) + handleError(err) + + contentSize := 1024 * 8 // 8KB + content := make([]byte, contentSize) + body := bytes.NewReader(content) + rsc := streaming.NopCloser(body) + + _, err = srcFileClient.UploadRange(context.Background(), 0, rsc, nil) + handleError(err) + + perms := sas.FilePermissions{Read: true, Write: true} + sasQueryParams, err := sas.SignatureValues{ + Protocol: sas.ProtocolHTTPS, // Users MUST use HTTPS (not HTTP) + ExpiryTime: time.Now().UTC().Add(48 * time.Hour), // 48-hours before expiration + ShareName: shareName, + FilePath: srcFileName, + Permissions: perms.String(), + }.SignWithSharedKey(cred) + handleError(err) + + srcFileSAS := srcFileClient.URL() + "?" + sasQueryParams.Encode() + + destFClient := shareClient.NewRootDirectoryClient().NewFileClient(dstFileName) + _, err = destFClient.Create(context.Background(), fileSize, nil) + handleError(err) + + _, err = destFClient.UploadRangeFromURL(context.Background(), srcFileSAS, 0, 0, int64(contentSize), nil) + handleError(err) +} diff --git a/sdk/storage/azfile/file/mmf_unix.go b/sdk/storage/azfile/file/mmf_unix.go new file mode 100644 index 000000000000..dc17528e6516 --- /dev/null +++ b/sdk/storage/azfile/file/mmf_unix.go @@ -0,0 +1,38 @@ +//go:build go1.18 && (linux || darwin || freebsd || openbsd || netbsd || solaris) +// +build go1.18 +// +build linux darwin freebsd openbsd netbsd solaris + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package file + +import ( + "fmt" + "os" + "syscall" +) + +// mmb is a memory mapped buffer +type mmb []byte + +// newMMB creates a new memory mapped buffer with the specified size +func newMMB(size int64) (mmb, error) { + prot, flags := syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_ANON|syscall.MAP_PRIVATE + addr, err := syscall.Mmap(-1, 0, int(size), prot, flags) + if err != nil { + return nil, os.NewSyscallError("Mmap", err) + } + return mmb(addr), nil +} + +// delete cleans up the memory mapped buffer +func (m *mmb) delete() { + err := syscall.Munmap(*m) + *m = nil + if err != nil { + // if we get here, there is likely memory corruption. + // please open an issue https://github.com/Azure/azure-sdk-for-go/issues + panic(fmt.Sprintf("Munmap error: %v", err)) + } +} diff --git a/sdk/storage/azfile/file/mmf_windows.go b/sdk/storage/azfile/file/mmf_windows.go new file mode 100644 index 000000000000..b59e6b415776 --- /dev/null +++ b/sdk/storage/azfile/file/mmf_windows.go @@ -0,0 +1,56 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package file + +import ( + "fmt" + "os" + "reflect" + "syscall" + "unsafe" +) + +// mmb is a memory mapped buffer +type mmb []byte + +// newMMB creates a new memory mapped buffer with the specified size +func newMMB(size int64) (mmb, error) { + const InvalidHandleValue = ^uintptr(0) // -1 + + prot, access := uint32(syscall.PAGE_READWRITE), uint32(syscall.FILE_MAP_WRITE) + hMMF, err := syscall.CreateFileMapping(syscall.Handle(InvalidHandleValue), nil, prot, uint32(size>>32), uint32(size&0xffffffff), nil) + if err != nil { + return nil, os.NewSyscallError("CreateFileMapping", err) + } + defer func() { + _ = syscall.CloseHandle(hMMF) + }() + + addr, err := syscall.MapViewOfFile(hMMF, access, 0, 0, uintptr(size)) + if err != nil { + return nil, os.NewSyscallError("MapViewOfFile", err) + } + + m := mmb{} + h := (*reflect.SliceHeader)(unsafe.Pointer(&m)) + h.Data = addr + h.Len = int(size) + h.Cap = h.Len + return m, nil +} + +// delete cleans up the memory mapped buffer +func (m *mmb) delete() { + addr := uintptr(unsafe.Pointer(&(([]byte)(*m)[0]))) + *m = mmb{} + err := syscall.UnmapViewOfFile(addr) + if err != nil { + // if we get here, there is likely memory corruption. + // please open an issue https://github.com/Azure/azure-sdk-for-go/issues + panic(fmt.Sprintf("UnmapViewOfFile error: %v", err)) + } +} diff --git a/sdk/storage/azfile/file/models.go b/sdk/storage/azfile/file/models.go new file mode 100644 index 000000000000..f27195800f02 --- /dev/null +++ b/sdk/storage/azfile/file/models.go @@ -0,0 +1,743 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package file + +import ( + "encoding/binary" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/sas" + "io" + "time" +) + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential = exported.SharedKeyCredential + +// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the +// storage account's name and either its primary or secondary key. +func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) { + return exported.NewSharedKeyCredential(accountName, accountKey) +} + +// SMBProperties contains the optional parameters regarding the SMB/NTFS properties for a file. +type SMBProperties = exported.SMBProperties + +// NTFSFileAttributes for Files and Directories. +// The subset of attributes is listed at: https://learn.microsoft.com/en-us/rest/api/storageservices/set-file-properties#file-system-attributes. +type NTFSFileAttributes = exported.NTFSFileAttributes + +// Permissions contains the optional parameters for the permissions on the file. +type Permissions = exported.Permissions + +// HTTPHeaders contains optional parameters for the Client.Create method. +type HTTPHeaders = generated.ShareFileHTTPHeaders + +// LeaseAccessConditions contains optional parameters to access leased entity. +type LeaseAccessConditions = generated.LeaseAccessConditions + +// SourceModifiedAccessConditions contains a group of parameters for the FileClient.UploadRangeFromURL method. +type SourceModifiedAccessConditions = generated.SourceModifiedAccessConditions + +// HTTPRange defines a range of bytes within an HTTP resource, starting at offset and +// ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange +// which has an offset but no zero value count indicates from the offset to the resource's end. +type HTTPRange = exported.HTTPRange + +// ShareFileRangeList - The list of file ranges. +type ShareFileRangeList = generated.ShareFileRangeList + +// ClearRange - Ranges there were cleared. +type ClearRange = generated.ClearRange + +// ShareFileRange - An Azure Storage file range. +type ShareFileRange = generated.FileRange + +// --------------------------------------------------------------------------------------------------------------------- + +// CreateOptions contains the optional parameters for the Client.Create method. +type CreateOptions struct { + // The default value is 'None' for Attributes and 'now' for CreationTime and LastWriteTime fields in file.SMBProperties. + SMBProperties *SMBProperties + // The default value is 'inherit' for Permission field in file.Permissions. + Permissions *Permissions + HTTPHeaders *HTTPHeaders + LeaseAccessConditions *LeaseAccessConditions + // A name-value pair to associate with a file storage object. + Metadata map[string]*string +} + +func (o *CreateOptions) format() (fileAttributes string, fileCreationTime string, fileLastWriteTime string, + createOptions *generated.FileClientCreateOptions, fileHTTPHeaders *generated.ShareFileHTTPHeaders, leaseAccessConditions *LeaseAccessConditions) { + if o == nil { + return shared.FileAttributesNone, shared.DefaultCurrentTimeString, shared.DefaultCurrentTimeString, &generated.FileClientCreateOptions{ + FilePermission: to.Ptr(shared.DefaultFilePermissionString), + }, nil, nil + } + + fileAttributes, fileCreationTime, fileLastWriteTime = o.SMBProperties.Format(false, shared.FileAttributesNone, shared.DefaultCurrentTimeString) + + permission, permissionKey := o.Permissions.Format(shared.DefaultFilePermissionString) + + createOptions = &generated.FileClientCreateOptions{ + FilePermission: permission, + FilePermissionKey: permissionKey, + Metadata: o.Metadata, + } + + fileHTTPHeaders = o.HTTPHeaders + leaseAccessConditions = o.LeaseAccessConditions + + return +} + +// --------------------------------------------------------------------------------------------------------------------- + +// DeleteOptions contains the optional parameters for the Client.Delete method. +type DeleteOptions struct { + // LeaseAccessConditions contains optional parameters to access leased entity. + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *DeleteOptions) format() (*generated.FileClientDeleteOptions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil + } + return nil, o.LeaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetPropertiesOptions contains the optional parameters for the Client.GetProperties method. +type GetPropertiesOptions struct { + // ShareSnapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query for the file properties. + ShareSnapshot *string + // LeaseAccessConditions contains optional parameters to access leased entity. + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *GetPropertiesOptions) format() (*generated.FileClientGetPropertiesOptions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil + } + + return &generated.FileClientGetPropertiesOptions{ + Sharesnapshot: o.ShareSnapshot, + }, o.LeaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetHTTPHeadersOptions contains the optional parameters for the Client.SetHTTPHeaders method. +type SetHTTPHeadersOptions struct { + // Resizes a file to the specified size. If the specified byte value is less than the current size of the file, then all ranges + // above the specified byte value are cleared. + FileContentLength *int64 + // The default value is 'preserve' for Attributes, CreationTime and LastWriteTime fields in file.SMBProperties. + SMBProperties *SMBProperties + // The default value is 'preserve' for Permission field in file.Permissions. + Permissions *Permissions + HTTPHeaders *HTTPHeaders + // LeaseAccessConditions contains optional parameters to access leased entity. + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *SetHTTPHeadersOptions) format() (fileAttributes string, fileCreationTime string, fileLastWriteTime string, + opts *generated.FileClientSetHTTPHeadersOptions, fileHTTPHeaders *generated.ShareFileHTTPHeaders, leaseAccessConditions *LeaseAccessConditions) { + if o == nil { + return shared.DefaultPreserveString, shared.DefaultPreserveString, shared.DefaultPreserveString, &generated.FileClientSetHTTPHeadersOptions{ + FilePermission: to.Ptr(shared.DefaultPreserveString), + }, nil, nil + } + + fileAttributes, fileCreationTime, fileLastWriteTime = o.SMBProperties.Format(false, shared.DefaultPreserveString, shared.DefaultPreserveString) + + permission, permissionKey := o.Permissions.Format(shared.DefaultPreserveString) + + opts = &generated.FileClientSetHTTPHeadersOptions{ + FileContentLength: o.FileContentLength, + FilePermission: permission, + FilePermissionKey: permissionKey, + } + + fileHTTPHeaders = o.HTTPHeaders + leaseAccessConditions = o.LeaseAccessConditions + + return +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetMetadataOptions contains the optional parameters for the Client.SetMetadata method. +type SetMetadataOptions struct { + // A name-value pair to associate with a file storage object. + Metadata map[string]*string + // LeaseAccessConditions contains optional parameters to access leased entity. + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *SetMetadataOptions) format() (*generated.FileClientSetMetadataOptions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil + } + return &generated.FileClientSetMetadataOptions{ + Metadata: o.Metadata, + }, o.LeaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// StartCopyFromURLOptions contains the optional parameters for the Client.StartCopyFromURL method. +type StartCopyFromURLOptions struct { + // A name-value pair to associate with a file storage object. + Metadata map[string]*string + // required if x-ms-file-permission-copy-mode is specified as override + Permissions *Permissions + CopyFileSMBInfo *CopyFileSMBInfo + // LeaseAccessConditions contains optional parameters to access leased entity. + // Required if the destination file has an active lease. + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *StartCopyFromURLOptions) format() (*generated.FileClientStartCopyOptions, *generated.CopyFileSMBInfo, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil, nil + } + + var permission, permissionKey *string + if o.Permissions != nil { + permission = o.Permissions.Permission + permissionKey = o.Permissions.PermissionKey + } + + opts := &generated.FileClientStartCopyOptions{ + FilePermission: permission, + FilePermissionKey: permissionKey, + Metadata: o.Metadata, + } + return opts, o.CopyFileSMBInfo.format(), o.LeaseAccessConditions +} + +// CopyFileSMBInfo contains a group of parameters for the FileClient.StartCopy method. +type CopyFileSMBInfo struct { + // Specifies either the option to copy file attributes from a source file(source) to a target file or a list of attributes + // to set on a target file. + Attributes CopyFileAttributes + // Specifies either the option to copy file creation time from a source file(source) to a target file or a time value in ISO + // 8601 format to set as creation time on a target file. + CreationTime CopyFileCreationTime + // Specifies either the option to copy file last write time from a source file(source) to a target file or a time value in + // ISO 8601 format to set as last write time on a target file. + LastWriteTime CopyFileLastWriteTime + // Specifies the option to copy file security descriptor from source file or to set it using the value which is defined by + // the header value of x-ms-file-permission or x-ms-file-permission-key. + PermissionCopyMode *PermissionCopyModeType + // Specifies the option to overwrite the target file if it already exists and has read-only attribute set. + IgnoreReadOnly *bool + // Specifies the option to set archive attribute on a target file. True means archive attribute will be set on a target file + // despite attribute overrides or a source file state. + SetArchiveAttribute *bool +} + +func (c *CopyFileSMBInfo) format() *generated.CopyFileSMBInfo { + if c == nil { + return nil + } + + opts := &generated.CopyFileSMBInfo{ + FilePermissionCopyMode: c.PermissionCopyMode, + IgnoreReadOnly: c.IgnoreReadOnly, + SetArchiveAttribute: c.SetArchiveAttribute, + } + + if c.Attributes != nil { + opts.FileAttributes = c.Attributes.FormatAttributes() + } + if c.CreationTime != nil { + opts.FileCreationTime = c.CreationTime.FormatCreationTime() + } + if c.LastWriteTime != nil { + opts.FileLastWriteTime = c.LastWriteTime.FormatLastWriteTime() + } + + return opts +} + +// CopyFileAttributes specifies either the option to copy file attributes from a source file(source) to a target file or +// a list of attributes to set on a target file. +type CopyFileAttributes = exported.CopyFileAttributes + +// SourceCopyFileAttributes specifies to copy file attributes from a source file(source) to a target file +type SourceCopyFileAttributes = exported.SourceCopyFileAttributes + +// DestinationCopyFileAttributes specifies a list of attributes to set on a target file. +type DestinationCopyFileAttributes = exported.DestinationCopyFileAttributes + +// CopyFileCreationTime specifies either the option to copy file creation time from a source file(source) to a target file or +// a time value in ISO 8601 format to set as creation time on a target file. +type CopyFileCreationTime = exported.CopyFileCreationTime + +// SourceCopyFileCreationTime specifies to copy file creation time from a source file(source) to a target file. +type SourceCopyFileCreationTime = exported.SourceCopyFileCreationTime + +// DestinationCopyFileCreationTime specifies a time value in ISO 8601 format to set as creation time on a target file. +type DestinationCopyFileCreationTime = exported.DestinationCopyFileCreationTime + +// CopyFileLastWriteTime specifies either the option to copy file last write time from a source file(source) to a target file or +// a time value in ISO 8601 format to set as last write time on a target file. +type CopyFileLastWriteTime = exported.CopyFileLastWriteTime + +// SourceCopyFileLastWriteTime specifies to copy file last write time from a source file(source) to a target file. +type SourceCopyFileLastWriteTime = exported.SourceCopyFileLastWriteTime + +// DestinationCopyFileLastWriteTime specifies a time value in ISO 8601 format to set as last write time on a target file. +type DestinationCopyFileLastWriteTime = exported.DestinationCopyFileLastWriteTime + +// --------------------------------------------------------------------------------------------------------------------- + +// AbortCopyOptions contains the optional parameters for the Client.AbortCopy method. +type AbortCopyOptions struct { + // LeaseAccessConditions contains optional parameters to access leased entity. + // Required if the destination file has an active lease. + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *AbortCopyOptions) format() (*generated.FileClientAbortCopyOptions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil + } + + return nil, o.LeaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// DownloadStreamOptions contains the optional parameters for the Client.DownloadStream method. +type DownloadStreamOptions struct { + // Range specifies a range of bytes. The default value is all bytes. + Range HTTPRange + // When this header is set to true and specified together with the Range header, the service returns the MD5 hash for the + // range, as long as the range is less than or equal to 4 MB in size. + RangeGetContentMD5 *bool + // LeaseAccessConditions contains optional parameters to access leased entity. + // If specified, the operation is performed only if the file's lease is currently active and + // the lease ID that's specified in the request matches the lease ID of the file. + // Otherwise, the operation fails with status code 412 (Precondition Failed). + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *DownloadStreamOptions) format() (*generated.FileClientDownloadOptions, *LeaseAccessConditions) { + if o == nil { + return nil, nil + } + return &generated.FileClientDownloadOptions{ + Range: exported.FormatHTTPRange(o.Range), + RangeGetContentMD5: o.RangeGetContentMD5, + }, o.LeaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// downloadOptions contains common options used by the Client.DownloadBuffer and Client.DownloadFile methods. +type downloadOptions struct { + // Range specifies a range of bytes. The default value is all bytes. + Range HTTPRange + + // ChunkSize specifies the chunk size to use for each parallel download; the default size is 4MB. + ChunkSize int64 + + // Progress is a function that is invoked periodically as bytes are received. + Progress func(bytesTransferred int64) + + // LeaseAccessConditions contains optional parameters to access leased entity. + LeaseAccessConditions *LeaseAccessConditions + + // Concurrency indicates the maximum number of chunks to download in parallel (0=default). + Concurrency uint16 + + // RetryReaderOptionsPerChunk is used when downloading each chunk. + RetryReaderOptionsPerChunk RetryReaderOptions +} + +func (o *downloadOptions) getFilePropertiesOptions() *GetPropertiesOptions { + if o == nil { + return nil + } + return &GetPropertiesOptions{ + LeaseAccessConditions: o.LeaseAccessConditions, + } +} + +func (o *downloadOptions) getDownloadFileOptions(rng HTTPRange) *DownloadStreamOptions { + downloadFileOptions := &DownloadStreamOptions{ + Range: rng, + } + if o != nil { + downloadFileOptions.LeaseAccessConditions = o.LeaseAccessConditions + } + return downloadFileOptions +} + +// DownloadBufferOptions contains the optional parameters for the Client.DownloadBuffer method. +type DownloadBufferOptions struct { + // Range specifies a range of bytes. The default value is all bytes. + Range HTTPRange + + // ChunkSize specifies the chunk size to use for each parallel download; the default size is 4MB. + ChunkSize int64 + + // Progress is a function that is invoked periodically as bytes are received. + Progress func(bytesTransferred int64) + + // LeaseAccessConditions contains optional parameters to access leased entity. + LeaseAccessConditions *LeaseAccessConditions + + // Concurrency indicates the maximum number of chunks to download in parallel (0=default). + Concurrency uint16 + + // RetryReaderOptionsPerChunk is used when downloading each chunk. + RetryReaderOptionsPerChunk RetryReaderOptions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// DownloadFileOptions contains the optional parameters for the Client.DownloadFile method. +type DownloadFileOptions struct { + // Range specifies a range of bytes. The default value is all bytes. + Range HTTPRange + + // ChunkSize specifies the chunk size to use for each parallel download; the default size is 4MB. + ChunkSize int64 + + // Progress is a function that is invoked periodically as bytes are received. + Progress func(bytesTransferred int64) + + // LeaseAccessConditions contains optional parameters to access leased entity. + LeaseAccessConditions *LeaseAccessConditions + + // Concurrency indicates the maximum number of chunks to download in parallel (0=default). + Concurrency uint16 + + // RetryReaderOptionsPerChunk is used when downloading each chunk. + RetryReaderOptionsPerChunk RetryReaderOptions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ResizeOptions contains the optional parameters for the Client.Resize method. +type ResizeOptions struct { + // LeaseAccessConditions contains optional parameters to access leased entity. + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *ResizeOptions) format(contentLength int64) (fileAttributes string, fileCreationTime string, fileLastWriteTime string, + opts *generated.FileClientSetHTTPHeadersOptions, leaseAccessConditions *LeaseAccessConditions) { + fileAttributes, fileCreationTime, fileLastWriteTime = shared.DefaultPreserveString, shared.DefaultPreserveString, shared.DefaultPreserveString + + opts = &generated.FileClientSetHTTPHeadersOptions{ + FileContentLength: &contentLength, + FilePermission: to.Ptr(shared.DefaultPreserveString), + } + + if o != nil { + leaseAccessConditions = o.LeaseAccessConditions + } + + return +} + +// --------------------------------------------------------------------------------------------------------------------- + +// UploadRangeOptions contains the optional parameters for the Client.UploadRange method. +type UploadRangeOptions struct { + // TransactionalValidation specifies the transfer validation type to use. + // The default is nil (no transfer validation). + TransactionalValidation TransferValidationType + // LeaseAccessConditions contains optional parameters to access leased entity. + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *UploadRangeOptions) format(offset int64, body io.ReadSeekCloser) (string, int64, *generated.FileClientUploadRangeOptions, *generated.LeaseAccessConditions, error) { + if offset < 0 || body == nil { + return "", 0, nil, nil, errors.New("invalid argument: offset must be >= 0 and body must not be nil") + } + + count, err := shared.ValidateSeekableStreamAt0AndGetCount(body) + if err != nil { + return "", 0, nil, nil, err + } + + if count == 0 { + return "", 0, nil, nil, errors.New("invalid argument: body must contain readable data whose size is > 0") + } + + httpRange := exported.FormatHTTPRange(HTTPRange{ + Offset: offset, + Count: count, + }) + rangeParam := "" + if httpRange != nil { + rangeParam = *httpRange + } + + var leaseAccessConditions *LeaseAccessConditions + uploadRangeOptions := &generated.FileClientUploadRangeOptions{} + + if o != nil { + leaseAccessConditions = o.LeaseAccessConditions + } + if o != nil && o.TransactionalValidation != nil { + _, err = o.TransactionalValidation.Apply(body, uploadRangeOptions) + if err != nil { + return "", 0, nil, nil, err + } + } + + return rangeParam, count, uploadRangeOptions, leaseAccessConditions, nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ClearRangeOptions contains the optional parameters for the Client.ClearRange method. +type ClearRangeOptions struct { + // LeaseAccessConditions contains optional parameters to access leased entity. + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *ClearRangeOptions) format(contentRange HTTPRange) (string, *generated.LeaseAccessConditions, error) { + httpRange := exported.FormatHTTPRange(contentRange) + if httpRange == nil || contentRange.Offset < 0 || contentRange.Count <= 0 { + return "", nil, errors.New("invalid argument: either offset is < 0 or count <= 0") + } + + if o == nil { + return *httpRange, nil, nil + } + + return *httpRange, o.LeaseAccessConditions, nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// UploadRangeFromURLOptions contains the optional parameters for the Client.UploadRangeFromURL method. +type UploadRangeFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentCRC64 uint64 + SourceModifiedAccessConditions *SourceModifiedAccessConditions + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *UploadRangeFromURLOptions) format(sourceOffset int64, destinationOffset int64, count int64) (string, *generated.FileClientUploadRangeFromURLOptions, *generated.SourceModifiedAccessConditions, *generated.LeaseAccessConditions, error) { + if sourceOffset < 0 || destinationOffset < 0 { + return "", nil, nil, nil, errors.New("invalid argument: source and destination offsets must be >= 0") + } + + httpRangeSrc := exported.FormatHTTPRange(HTTPRange{Offset: sourceOffset, Count: count}) + httpRangeDest := exported.FormatHTTPRange(HTTPRange{Offset: destinationOffset, Count: count}) + destRange := "" + if httpRangeDest != nil { + destRange = *httpRangeDest + } + + opts := &generated.FileClientUploadRangeFromURLOptions{ + SourceRange: httpRangeSrc, + } + + var sourceModifiedAccessConditions *SourceModifiedAccessConditions + var leaseAccessConditions *LeaseAccessConditions + + if o != nil { + opts.CopySourceAuthorization = o.CopySourceAuthorization + sourceModifiedAccessConditions = o.SourceModifiedAccessConditions + leaseAccessConditions = o.LeaseAccessConditions + + buf := make([]byte, 8) + binary.LittleEndian.PutUint64(buf, o.SourceContentCRC64) + opts.SourceContentCRC64 = buf + } + + return destRange, opts, sourceModifiedAccessConditions, leaseAccessConditions, nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetRangeListOptions contains the optional parameters for the Client.GetRangeList method. +type GetRangeListOptions struct { + // The previous snapshot parameter is an opaque DateTime value that, when present, specifies the previous snapshot. + PrevShareSnapshot *string + // Specifies the range of bytes over which to list ranges, inclusively. + Range HTTPRange + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + ShareSnapshot *string + // LeaseAccessConditions contains optional parameters to access leased entity. + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *GetRangeListOptions) format() (*generated.FileClientGetRangeListOptions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil + } + + return &generated.FileClientGetRangeListOptions{ + Prevsharesnapshot: o.PrevShareSnapshot, + Range: exported.FormatHTTPRange(o.Range), + Sharesnapshot: o.ShareSnapshot, + }, o.LeaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetSASURLOptions contains the optional parameters for the Client.GetSASURL method. +type GetSASURLOptions struct { + StartTime *time.Time +} + +func (o *GetSASURLOptions) format() time.Time { + if o == nil { + return time.Time{} + } + + var st time.Time + if o.StartTime != nil { + st = o.StartTime.UTC() + } else { + st = time.Time{} + } + return st +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ForceCloseHandlesOptions contains the optional parameters for the Client.ForceCloseHandles method. +type ForceCloseHandlesOptions struct { + // A string value that identifies the portion of the list to be returned with the next list operation. The operation returns + // a marker value within the response body if the list returned was not complete. + // The marker value may then be used in a subsequent call to request the next set of list items. The marker value is opaque + // to the client. + Marker *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + ShareSnapshot *string +} + +func (o *ForceCloseHandlesOptions) format() *generated.FileClientForceCloseHandlesOptions { + if o == nil { + return nil + } + + return &generated.FileClientForceCloseHandlesOptions{ + Marker: o.Marker, + Sharesnapshot: o.ShareSnapshot, + } +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ListHandlesOptions contains the optional parameters for the Client.ListHandles method. +type ListHandlesOptions struct { + // A string value that identifies the portion of the list to be returned with the next list operation. The operation returns + // a marker value within the response body if the list returned was not complete. + // The marker value may then be used in a subsequent call to request the next set of list items. The marker value is opaque + // to the client. + Marker *string + // Specifies the maximum number of entries to return. If the request does not specify maxresults, or specifies a value greater + // than 5,000, the server will return up to 5,000 items. + MaxResults *int32 + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + ShareSnapshot *string +} + +func (o *ListHandlesOptions) format() *generated.FileClientListHandlesOptions { + if o == nil { + return nil + } + + return &generated.FileClientListHandlesOptions{ + Marker: o.Marker, + Maxresults: o.MaxResults, + Sharesnapshot: o.ShareSnapshot, + } +} + +// Handle - A listed Azure Storage handle item. +type Handle = generated.Handle + +// --------------------------------------------------------------------------------------------------------------------- + +// uploadFromReaderOptions identifies options used by the UploadBuffer and UploadFile functions. +type uploadFromReaderOptions struct { + // ChunkSize specifies the chunk size to use in bytes; the default (and maximum size) is MaxUpdateRangeBytes. + ChunkSize int64 + + // Progress is a function that is invoked periodically as bytes are sent to the FileClient. + // Note that the progress reporting is not always increasing; it can go down when retrying a request. + Progress func(bytesTransferred int64) + + // Concurrency indicates the maximum number of chunks to upload in parallel (default is 5) + Concurrency uint16 + + // LeaseAccessConditions contains optional parameters to access leased entity. + LeaseAccessConditions *LeaseAccessConditions +} + +// UploadBufferOptions provides set of configurations for Client.UploadBuffer operation. +type UploadBufferOptions = uploadFromReaderOptions + +// UploadFileOptions provides set of configurations for Client.UploadFile operation. +type UploadFileOptions = uploadFromReaderOptions + +func (o *uploadFromReaderOptions) getUploadRangeOptions() *UploadRangeOptions { + return &UploadRangeOptions{ + LeaseAccessConditions: o.LeaseAccessConditions, + } +} + +// --------------------------------------------------------------------------------------------------------------------- + +// UploadStreamOptions provides set of configurations for Client.UploadStream operation. +type UploadStreamOptions struct { + // ChunkSize defines the size of the buffer used during upload. The default and minimum value is 1 MiB. + // Maximum size of a chunk is MaxUpdateRangeBytes. + ChunkSize int64 + + // Concurrency defines the max number of concurrent uploads to be performed to upload the file. + // Each concurrent upload will create a buffer of size ChunkSize. The default value is one. + Concurrency int + + // LeaseAccessConditions contains optional parameters to access leased entity. + LeaseAccessConditions *LeaseAccessConditions +} + +func (u *UploadStreamOptions) setDefaults() { + if u.Concurrency == 0 { + u.Concurrency = 1 + } + + if u.ChunkSize < _1MiB { + u.ChunkSize = _1MiB + } +} + +func (u *UploadStreamOptions) getUploadRangeOptions() *UploadRangeOptions { + return &UploadRangeOptions{ + LeaseAccessConditions: u.LeaseAccessConditions, + } +} + +// URLParts object represents the components that make up an Azure Storage Container/Blob URL. +// NOTE: Changing any SAS-related field requires computing a new SAS signature. +type URLParts = sas.URLParts + +// ParseURL parses a URL initializing URLParts' fields including any SAS-related & snapshot query parameters. Any other +// query parameters remain in the UnparsedParams field. This method overwrites all fields in the URLParts object. +func ParseURL(u string) (URLParts, error) { + return sas.ParseURL(u) +} diff --git a/sdk/storage/azfile/file/responses.go b/sdk/storage/azfile/file/responses.go new file mode 100644 index 000000000000..e47d87741861 --- /dev/null +++ b/sdk/storage/azfile/file/responses.go @@ -0,0 +1,93 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package file + +import ( + "context" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" + "io" +) + +// CreateResponse contains the response from method Client.Create. +type CreateResponse = generated.FileClientCreateResponse + +// DeleteResponse contains the response from method Client.Delete. +type DeleteResponse = generated.FileClientDeleteResponse + +// GetPropertiesResponse contains the response from method Client.GetProperties. +type GetPropertiesResponse = generated.FileClientGetPropertiesResponse + +// SetMetadataResponse contains the response from method Client.SetMetadata. +type SetMetadataResponse = generated.FileClientSetMetadataResponse + +// SetHTTPHeadersResponse contains the response from method Client.SetHTTPHeaders. +type SetHTTPHeadersResponse = generated.FileClientSetHTTPHeadersResponse + +// StartCopyFromURLResponse contains the response from method Client.StartCopyFromURL. +type StartCopyFromURLResponse = generated.FileClientStartCopyResponse + +// AbortCopyResponse contains the response from method Client.AbortCopy. +type AbortCopyResponse = generated.FileClientAbortCopyResponse + +// DownloadResponse contains the response from method FileClient.Download. +type DownloadResponse = generated.FileClientDownloadResponse + +// DownloadStreamResponse contains the response from method Client.DownloadStream. +// To read from the stream, read from the Body field, or call the NewRetryReader method. +type DownloadStreamResponse struct { + DownloadResponse + + client *Client + getInfo httpGetterInfo + leaseAccessConditions *LeaseAccessConditions +} + +// NewRetryReader constructs new RetryReader stream for reading data. If a connection fails while +// reading, it will make additional requests to reestablish a connection and continue reading. +// Pass nil for options to accept the default options. +// Callers of this method should not access the DownloadStreamResponse.Body field. +func (r *DownloadStreamResponse) NewRetryReader(ctx context.Context, options *RetryReaderOptions) *RetryReader { + if options == nil { + options = &RetryReaderOptions{} + } + + return newRetryReader(ctx, r.Body, r.getInfo, func(ctx context.Context, getInfo httpGetterInfo) (io.ReadCloser, error) { + options := DownloadStreamOptions{ + Range: getInfo.Range, + LeaseAccessConditions: r.leaseAccessConditions, + } + resp, err := r.client.DownloadStream(ctx, &options) + if err != nil { + return nil, err + } + return resp.Body, err + }, *options) +} + +// ResizeResponse contains the response from method Client.Resize. +type ResizeResponse = generated.FileClientSetHTTPHeadersResponse + +// UploadRangeResponse contains the response from method Client.UploadRange. +type UploadRangeResponse = generated.FileClientUploadRangeResponse + +// ClearRangeResponse contains the response from method Client.ClearRange. +type ClearRangeResponse = generated.FileClientUploadRangeResponse + +// UploadRangeFromURLResponse contains the response from method Client.UploadRangeFromURL. +type UploadRangeFromURLResponse = generated.FileClientUploadRangeFromURLResponse + +// GetRangeListResponse contains the response from method Client.GetRangeList. +type GetRangeListResponse = generated.FileClientGetRangeListResponse + +// ForceCloseHandlesResponse contains the response from method Client.ForceCloseHandles. +type ForceCloseHandlesResponse = generated.FileClientForceCloseHandlesResponse + +// ListHandlesResponse contains the response from method Client.ListHandles. +type ListHandlesResponse = generated.FileClientListHandlesResponse + +// ListHandlesSegmentResponse - An enumeration of handles. +type ListHandlesSegmentResponse = generated.ListHandlesResponse diff --git a/sdk/storage/azfile/file/retry_reader.go b/sdk/storage/azfile/file/retry_reader.go new file mode 100644 index 000000000000..2e76a91f3169 --- /dev/null +++ b/sdk/storage/azfile/file/retry_reader.go @@ -0,0 +1,186 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package file + +import ( + "context" + "io" + "net" + "strings" + "sync" +) + +// HTTPGetter is a function type that refers to a method that performs an HTTP GET operation. +type httpGetter func(ctx context.Context, i httpGetterInfo) (io.ReadCloser, error) + +// httpGetterInfo is passed to an HTTPGetter function passing it parameters +// that should be used to make an HTTP GET request. +type httpGetterInfo struct { + Range HTTPRange +} + +// RetryReaderOptions configures the retry reader's behavior. +// Zero-value fields will have their specified default values applied during use. +// This allows for modification of a subset of fields. +type RetryReaderOptions struct { + // MaxRetries specifies the maximum number of attempts a failed read will be retried + // before producing an error. + // The default value is three. + MaxRetries int32 + + // OnFailedRead, when non-nil, is called after any failure to read. Expected usage is diagnostic logging. + OnFailedRead func(failureCount int32, lastError error, rnge HTTPRange, willRetry bool) + + // EarlyCloseAsError can be set to true to prevent retries after "read on closed response body". By default, + // retryReader has the following special behaviour: closing the response body before it is all read is treated as a + // retryable error. This is to allow callers to force a retry by closing the body from another goroutine (e.g. if the = + // read is too slow, caller may want to force a retry in the hope that the retry will be quicker). If + // TreatEarlyCloseAsError is true, then retryReader's special behaviour is suppressed, and "read on closed body" is instead + // treated as a fatal (non-retryable) error. + // Note that setting TreatEarlyCloseAsError only guarantees that Closing will produce a fatal error if the Close happens + // from the same "thread" (goroutine) as Read. Concurrent Close calls from other goroutines may instead produce network errors + // which will be retried. + // The default value is false. + EarlyCloseAsError bool + + doInjectError bool + doInjectErrorRound int32 + injectedError error +} + +// RetryReader attempts to read from response, and if there is a retry-able network error +// returned during reading, it will retry according to retry reader option through executing +// user defined action with provided data to get a new response, and continue the overall reading process +// through reading from the new response. +// RetryReader implements the io.ReadCloser interface. +type RetryReader struct { + ctx context.Context + info httpGetterInfo + retryReaderOptions RetryReaderOptions + getter httpGetter + countWasBounded bool + + // we support Close-ing during Reads (from other goroutines), so we protect the shared state, which is response + responseMu *sync.Mutex + response io.ReadCloser +} + +// newRetryReader creates a retry reader. +func newRetryReader(ctx context.Context, initialResponse io.ReadCloser, info httpGetterInfo, getter httpGetter, o RetryReaderOptions) *RetryReader { + if o.MaxRetries < 1 { + o.MaxRetries = 3 + } + return &RetryReader{ + ctx: ctx, + getter: getter, + info: info, + countWasBounded: info.Range.Count != CountToEnd, + response: initialResponse, + responseMu: &sync.Mutex{}, + retryReaderOptions: o, + } +} + +// setResponse function +func (s *RetryReader) setResponse(r io.ReadCloser) { + s.responseMu.Lock() + defer s.responseMu.Unlock() + s.response = r +} + +// Read from retry reader +func (s *RetryReader) Read(p []byte) (n int, err error) { + for try := int32(0); ; try++ { + //fmt.Println(try) // Comment out for debugging. + if s.countWasBounded && s.info.Range.Count == CountToEnd { + // User specified an original count and the remaining bytes are 0, return 0, EOF + return 0, io.EOF + } + + s.responseMu.Lock() + resp := s.response + s.responseMu.Unlock() + if resp == nil { // We don't have a response stream to read from, try to get one. + newResponse, err := s.getter(s.ctx, s.info) + if err != nil { + return 0, err + } + // Successful GET; this is the network stream we'll read from. + s.setResponse(newResponse) + resp = newResponse + } + n, err := resp.Read(p) // Read from the stream (this will return non-nil err if forceRetry is called, from another goroutine, while it is running) + + // Injection mechanism for testing. + if s.retryReaderOptions.doInjectError && try == s.retryReaderOptions.doInjectErrorRound { + if s.retryReaderOptions.injectedError != nil { + err = s.retryReaderOptions.injectedError + } else { + err = &net.DNSError{IsTemporary: true} + } + } + + // We successfully read data or end EOF. + if err == nil || err == io.EOF { + s.info.Range.Offset += int64(n) // Increments the start offset in case we need to make a new HTTP request in the future + if s.info.Range.Count != CountToEnd { + s.info.Range.Count -= int64(n) // Decrement the count in case we need to make a new HTTP request in the future + } + return n, err // Return the return to the caller + } + _ = s.Close() + + s.setResponse(nil) // Our stream is no longer good + + // Check the retry count and error code, and decide whether to retry. + retriesExhausted := try >= s.retryReaderOptions.MaxRetries + _, isNetError := err.(net.Error) + isUnexpectedEOF := err == io.ErrUnexpectedEOF + willRetry := (isNetError || isUnexpectedEOF || s.wasRetryableEarlyClose(err)) && !retriesExhausted + + // Notify, for logging purposes, of any failures + if s.retryReaderOptions.OnFailedRead != nil { + failureCount := try + 1 // because try is zero-based + s.retryReaderOptions.OnFailedRead(failureCount, err, s.info.Range, willRetry) + } + + if willRetry { + continue + // Loop around and try to get and read from new stream. + } + return n, err // Not retryable, or retries exhausted, so just return + } +} + +// By default, we allow early Closing, from another concurrent goroutine, to be used to force a retry +// Is this safe, to close early from another goroutine? Early close ultimately ends up calling +// net.Conn.Close, and that is documented as "Any blocked Read or Write operations will be unblocked and return errors" +// which is exactly the behaviour we want. +// NOTE: that if caller has forced an early Close from a separate goroutine (separate from the Read) +// then there are two different types of error that may happen - either the one we check for here, +// or a net.Error (due to closure of connection). Which one happens depends on timing. We only need this routine +// to check for one, since the other is a net.Error, which our main Read retry loop is already handing. +func (s *RetryReader) wasRetryableEarlyClose(err error) bool { + if s.retryReaderOptions.EarlyCloseAsError { + return false // user wants all early closes to be errors, and so not retryable + } + // unfortunately, http.errReadOnClosedResBody is private, so the best we can do here is to check for its text + return strings.HasSuffix(err.Error(), ReadOnClosedBodyMessage) +} + +// ReadOnClosedBodyMessage of retry reader +const ReadOnClosedBodyMessage = "read on closed response body" + +// Close retry reader +func (s *RetryReader) Close() error { + s.responseMu.Lock() + defer s.responseMu.Unlock() + if s.response != nil { + return s.response.Close() + } + return nil +} diff --git a/sdk/storage/azfile/fileerror/error_codes.go b/sdk/storage/azfile/fileerror/error_codes.go new file mode 100644 index 000000000000..c897c0953828 --- /dev/null +++ b/sdk/storage/azfile/fileerror/error_codes.go @@ -0,0 +1,107 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package fileerror + +import ( + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" +) + +// HasCode returns true if the provided error is an *azcore.ResponseError +// with its ErrorCode field equal to one of the specified Codes. +func HasCode(err error, codes ...Code) bool { + var respErr *azcore.ResponseError + if !errors.As(err, &respErr) { + return false + } + + for _, code := range codes { + if respErr.ErrorCode == string(code) { + return true + } + } + + return false +} + +// Code - Error codes returned by the service +type Code = generated.StorageErrorCode + +const ( + AccountAlreadyExists Code = "AccountAlreadyExists" + AccountBeingCreated Code = "AccountBeingCreated" + AccountIsDisabled Code = "AccountIsDisabled" + AuthenticationFailed Code = "AuthenticationFailed" + AuthorizationFailure Code = "AuthorizationFailure" + AuthorizationPermissionMismatch Code = "AuthorizationPermissionMismatch" + AuthorizationProtocolMismatch Code = "AuthorizationProtocolMismatch" + AuthorizationResourceTypeMismatch Code = "AuthorizationResourceTypeMismatch" + AuthorizationServiceMismatch Code = "AuthorizationServiceMismatch" + AuthorizationSourceIPMismatch Code = "AuthorizationSourceIPMismatch" + CannotDeleteFileOrDirectory Code = "CannotDeleteFileOrDirectory" + ClientCacheFlushDelay Code = "ClientCacheFlushDelay" + ConditionHeadersNotSupported Code = "ConditionHeadersNotSupported" + ConditionNotMet Code = "ConditionNotMet" + DeletePending Code = "DeletePending" + DirectoryNotEmpty Code = "DirectoryNotEmpty" + EmptyMetadataKey Code = "EmptyMetadataKey" + FeatureVersionMismatch Code = "FeatureVersionMismatch" + FileLockConflict Code = "FileLockConflict" + InsufficientAccountPermissions Code = "InsufficientAccountPermissions" + InternalError Code = "InternalError" + InvalidAuthenticationInfo Code = "InvalidAuthenticationInfo" + InvalidFileOrDirectoryPathName Code = "InvalidFileOrDirectoryPathName" + InvalidHTTPVerb Code = "InvalidHttpVerb" + InvalidHeaderValue Code = "InvalidHeaderValue" + InvalidInput Code = "InvalidInput" + InvalidMD5 Code = "InvalidMd5" + InvalidMetadata Code = "InvalidMetadata" + InvalidQueryParameterValue Code = "InvalidQueryParameterValue" + InvalidRange Code = "InvalidRange" + InvalidResourceName Code = "InvalidResourceName" + InvalidURI Code = "InvalidUri" + InvalidXMLDocument Code = "InvalidXmlDocument" + InvalidXMLNodeValue Code = "InvalidXmlNodeValue" + MD5Mismatch Code = "Md5Mismatch" + MetadataTooLarge Code = "MetadataTooLarge" + MissingContentLengthHeader Code = "MissingContentLengthHeader" + MissingRequiredHeader Code = "MissingRequiredHeader" + MissingRequiredQueryParameter Code = "MissingRequiredQueryParameter" + MissingRequiredXMLNode Code = "MissingRequiredXmlNode" + MultipleConditionHeadersNotSupported Code = "MultipleConditionHeadersNotSupported" + OperationTimedOut Code = "OperationTimedOut" + OutOfRangeInput Code = "OutOfRangeInput" + OutOfRangeQueryParameterValue Code = "OutOfRangeQueryParameterValue" + ParentNotFound Code = "ParentNotFound" + ReadOnlyAttribute Code = "ReadOnlyAttribute" + RequestBodyTooLarge Code = "RequestBodyTooLarge" + RequestURLFailedToParse Code = "RequestUrlFailedToParse" + ResourceAlreadyExists Code = "ResourceAlreadyExists" + ResourceNotFound Code = "ResourceNotFound" + ResourceTypeMismatch Code = "ResourceTypeMismatch" + ServerBusy Code = "ServerBusy" + ShareAlreadyExists Code = "ShareAlreadyExists" + ShareBeingDeleted Code = "ShareBeingDeleted" + ShareDisabled Code = "ShareDisabled" + ShareHasSnapshots Code = "ShareHasSnapshots" + ShareNotFound Code = "ShareNotFound" + ShareSnapshotCountExceeded Code = "ShareSnapshotCountExceeded" + ShareSnapshotInProgress Code = "ShareSnapshotInProgress" + ShareSnapshotOperationNotSupported Code = "ShareSnapshotOperationNotSupported" + SharingViolation Code = "SharingViolation" + UnsupportedHTTPVerb Code = "UnsupportedHttpVerb" + UnsupportedHeader Code = "UnsupportedHeader" + UnsupportedQueryParameter Code = "UnsupportedQueryParameter" + UnsupportedXMLNode Code = "UnsupportedXmlNode" +) + +var ( + // MissingSharedKeyCredential - Error is returned when SAS URL is being created without SharedKeyCredential. + MissingSharedKeyCredential = errors.New("SAS can only be signed with a SharedKeyCredential") +) diff --git a/sdk/storage/azfile/go.mod b/sdk/storage/azfile/go.mod new file mode 100644 index 000000000000..cbd96fa64efc --- /dev/null +++ b/sdk/storage/azfile/go.mod @@ -0,0 +1,28 @@ +module github.com/Azure/azure-sdk-for-go/sdk/storage/azfile + +go 1.18 + +require ( + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.2 + github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0 + github.com/stretchr/testify v1.7.1 +) + +require ( + github.com/AzureAD/microsoft-authentication-library-for-go v0.9.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/dnaeon/go-vcr v1.1.0 // indirect + github.com/golang-jwt/jwt/v4 v4.5.0 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect + github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + golang.org/x/crypto v0.7.0 // indirect + golang.org/x/net v0.8.0 // indirect + golang.org/x/sys v0.7.0 // indirect + golang.org/x/text v0.9.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect +) diff --git a/sdk/storage/azfile/go.sum b/sdk/storage/azfile/go.sum new file mode 100644 index 000000000000..8f03fb9639d6 --- /dev/null +++ b/sdk/storage/azfile/go.sum @@ -0,0 +1,46 @@ +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0 h1:rTnT/Jrcm+figWlYz4Ixzt0SJVR2cMC8lvZcimipiEY= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0/go.mod h1:ON4tFdPTwRcgWEaVDrN3584Ef+b7GgSJaXxe5fW9t4M= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.2 h1:uqM+VoHjVH6zdlkLF2b6O0ZANcHoj3rO0PoQ3jglUJA= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.2/go.mod h1:twTKAa1E6hLmSDjLhaCkbTMQKc7p/rNLU40rLxGEOCI= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0 h1:u/LLAOFgsMv7HmNL4Qufg58y+qElGOt5qv0z1mURkRY= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0/go.mod h1:2e8rMJtl2+2j+HXbTBwnyGpm5Nou7KhvSfxOq8JpTag= +github.com/AzureAD/microsoft-authentication-library-for-go v0.9.0 h1:UE9n9rkJF62ArLb1F3DEjRt8O3jLwMWdSoypKV4f3MU= +github.com/AzureAD/microsoft-authentication-library-for-go v0.9.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c= +github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A= +golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= +golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/sdk/storage/azfile/internal/base/clients.go b/sdk/storage/azfile/internal/base/clients.go new file mode 100644 index 000000000000..93317d4dc29b --- /dev/null +++ b/sdk/storage/azfile/internal/base/clients.go @@ -0,0 +1,60 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package base + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" +) + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions struct { + azcore.ClientOptions +} + +type Client[T any] struct { + inner *T + sharedKey *exported.SharedKeyCredential +} + +func InnerClient[T any](client *Client[T]) *T { + return client.inner +} + +func SharedKey[T any](client *Client[T]) *exported.SharedKeyCredential { + return client.sharedKey +} + +func NewServiceClient(serviceURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *Client[generated.ServiceClient] { + return &Client[generated.ServiceClient]{ + inner: generated.NewServiceClient(serviceURL, pipeline), + sharedKey: sharedKey, + } +} + +func NewShareClient(shareURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *Client[generated.ShareClient] { + return &Client[generated.ShareClient]{ + inner: generated.NewShareClient(shareURL, pipeline), + sharedKey: sharedKey, + } +} + +func NewDirectoryClient(directoryURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *Client[generated.DirectoryClient] { + return &Client[generated.DirectoryClient]{ + inner: generated.NewDirectoryClient(directoryURL, pipeline), + sharedKey: sharedKey, + } +} + +func NewFileClient(fileURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *Client[generated.FileClient] { + return &Client[generated.FileClient]{ + inner: generated.NewFileClient(fileURL, pipeline), + sharedKey: sharedKey, + } +} diff --git a/sdk/storage/azfile/internal/exported/access_policy.go b/sdk/storage/azfile/internal/exported/access_policy.go new file mode 100644 index 000000000000..d9c95db2821a --- /dev/null +++ b/sdk/storage/azfile/internal/exported/access_policy.go @@ -0,0 +1,62 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "bytes" + "fmt" +) + +// The AccessPolicyPermission type simplifies creating the permissions string for a share's access policy. +// Initialize an instance of this type and then call its String method to set AccessPolicy's Permission field. +type AccessPolicyPermission struct { + Read, Create, Write, Delete, List bool +} + +// String produces the access policy permission string for an Azure Storage share. +// Call this method to set AccessPolicy's Permission field. +func (p AccessPolicyPermission) String() string { + var b bytes.Buffer + if p.Read { + b.WriteRune('r') + } + if p.Create { + b.WriteRune('c') + } + if p.Write { + b.WriteRune('w') + } + if p.Delete { + b.WriteRune('d') + } + if p.List { + b.WriteRune('l') + } + return b.String() +} + +// Parse initializes the AccessPolicyPermission's fields from a string. +func (p *AccessPolicyPermission) Parse(s string) error { + *p = AccessPolicyPermission{} // Clear the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'c': + p.Create = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'l': + p.List = true + default: + return fmt.Errorf("invalid permission: '%v'", r) + } + } + return nil +} diff --git a/sdk/storage/azfile/internal/exported/copy_file_smb_options.go b/sdk/storage/azfile/internal/exported/copy_file_smb_options.go new file mode 100644 index 000000000000..9f0da40bba4d --- /dev/null +++ b/sdk/storage/azfile/internal/exported/copy_file_smb_options.go @@ -0,0 +1,96 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" + "time" +) + +// CopyFileCreationTime specifies either the option to copy file creation time from a source file(source) to a target file or +// a time value in ISO 8601 format to set as creation time on a target file. +type CopyFileCreationTime interface { + FormatCreationTime() *string + notPubliclyImplementable() +} + +// SourceCopyFileCreationTime specifies to copy file creation time from a source file(source) to a target file. +type SourceCopyFileCreationTime struct { +} + +func (s SourceCopyFileCreationTime) FormatCreationTime() *string { + return to.Ptr("source") +} + +func (s SourceCopyFileCreationTime) notPubliclyImplementable() {} + +// DestinationCopyFileCreationTime specifies a time value in ISO 8601 format to set as creation time on a target file. +type DestinationCopyFileCreationTime time.Time + +func (d DestinationCopyFileCreationTime) FormatCreationTime() *string { + return to.Ptr(time.Time(d).UTC().Format(generated.ISO8601)) +} + +func (d DestinationCopyFileCreationTime) notPubliclyImplementable() {} + +// --------------------------------------------------------------------------------------------------------------------- + +// CopyFileLastWriteTime specifies either the option to copy file last write time from a source file(source) to a target file or +// a time value in ISO 8601 format to set as last write time on a target file. +type CopyFileLastWriteTime interface { + FormatLastWriteTime() *string + notPubliclyImplementable() +} + +// SourceCopyFileLastWriteTime specifies to copy file last write time from a source file(source) to a target file. +type SourceCopyFileLastWriteTime struct { +} + +func (s SourceCopyFileLastWriteTime) FormatLastWriteTime() *string { + return to.Ptr("source") +} + +func (s SourceCopyFileLastWriteTime) notPubliclyImplementable() {} + +// DestinationCopyFileLastWriteTime specifies a time value in ISO 8601 format to set as last write time on a target file. +type DestinationCopyFileLastWriteTime time.Time + +func (d DestinationCopyFileLastWriteTime) FormatLastWriteTime() *string { + return to.Ptr(time.Time(d).UTC().Format(generated.ISO8601)) +} + +func (d DestinationCopyFileLastWriteTime) notPubliclyImplementable() {} + +// --------------------------------------------------------------------------------------------------------------------- + +// CopyFileAttributes specifies either the option to copy file attributes from a source file(source) to a target file or +// a list of attributes to set on a target file. +type CopyFileAttributes interface { + FormatAttributes() *string + notPubliclyImplementable() +} + +// SourceCopyFileAttributes specifies to copy file attributes from a source file(source) to a target file +type SourceCopyFileAttributes struct { +} + +func (s SourceCopyFileAttributes) FormatAttributes() *string { + return to.Ptr("source") +} + +func (s SourceCopyFileAttributes) notPubliclyImplementable() {} + +// DestinationCopyFileAttributes specifies a list of attributes to set on a target file. +type DestinationCopyFileAttributes NTFSFileAttributes + +func (d DestinationCopyFileAttributes) FormatAttributes() *string { + attributes := NTFSFileAttributes(d) + return to.Ptr(attributes.String()) +} + +func (d DestinationCopyFileAttributes) notPubliclyImplementable() {} diff --git a/sdk/storage/azfile/internal/exported/exported.go b/sdk/storage/azfile/internal/exported/exported.go new file mode 100644 index 000000000000..9bc1ca47df84 --- /dev/null +++ b/sdk/storage/azfile/internal/exported/exported.go @@ -0,0 +1,33 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "fmt" + "strconv" +) + +// HTTPRange defines a range of bytes within an HTTP resource, starting at offset and +// ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange +// which has an offset but no zero value count indicates from the offset to the resource's end. +type HTTPRange struct { + Offset int64 + Count int64 +} + +// FormatHTTPRange converts an HTTPRange to its string format. +func FormatHTTPRange(r HTTPRange) *string { + if r.Offset == 0 && r.Count == 0 { + return nil // No specified range + } + endOffset := "" // if count == CountToEnd (0) + if r.Count > 0 { + endOffset = strconv.FormatInt((r.Offset+r.Count)-1, 10) + } + dataRange := fmt.Sprintf("bytes=%v-%s", r.Offset, endOffset) + return &dataRange +} diff --git a/sdk/storage/azfile/internal/exported/file_permissions.go b/sdk/storage/azfile/internal/exported/file_permissions.go new file mode 100644 index 000000000000..73fce6afb27c --- /dev/null +++ b/sdk/storage/azfile/internal/exported/file_permissions.go @@ -0,0 +1,32 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +// Permissions contains the optional parameters for the permissions on the file. +type Permissions struct { + // If specified the permission (security descriptor) shall be set for the directory/file. This header can be used if Permission + // size is <= 8KB, else x-ms-file-permission-key header shall be used. Default + // value: Inherit. If SDDL is specified as input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission + // or x-ms-file-permission-key should be specified. + Permission *string + // Key of the permission to be set for the directory/file. + // Note: Only one of the x-ms-file-permission or x-ms-file-permission-key should be specified. + PermissionKey *string +} + +// Format returns file permission string and permission key. +func (p *Permissions) Format(defaultFilePermissionStr string) (*string, *string) { + if p == nil { + return &defaultFilePermissionStr, nil + } + + if p.Permission == nil && p.PermissionKey == nil { + return &defaultFilePermissionStr, nil + } else { + return p.Permission, p.PermissionKey + } +} diff --git a/sdk/storage/azfile/internal/exported/log_events.go b/sdk/storage/azfile/internal/exported/log_events.go new file mode 100644 index 000000000000..d33528ea8eb2 --- /dev/null +++ b/sdk/storage/azfile/internal/exported/log_events.go @@ -0,0 +1,17 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +// NOTE: these are publicly exported via type-aliasing in azfile/log.go +const ( + // EventUpload is used when we compute number of chunks to upload and size of each chunk. + EventUpload log.Event = "azfile.Upload" +) diff --git a/sdk/storage/azfile/internal/exported/shared_key_credential.go b/sdk/storage/azfile/internal/exported/shared_key_credential.go new file mode 100644 index 000000000000..439617d07ba1 --- /dev/null +++ b/sdk/storage/azfile/internal/exported/shared_key_credential.go @@ -0,0 +1,218 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "fmt" + "net/http" + "net/url" + "sort" + "strings" + "sync/atomic" + "time" + + azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/shared" +) + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential struct { + // Only the NewSharedKeyCredential method should set these; all other methods should treat them as read-only + accountName string + accountKey atomic.Value // []byte +} + +// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the +// storage account's name and either its primary or secondary key. +func NewSharedKeyCredential(accountName string, accountKey string) (*SharedKeyCredential, error) { + c := SharedKeyCredential{accountName: accountName} + if err := c.SetAccountKey(accountKey); err != nil { + return nil, err + } + return &c, nil +} + +// AccountName returns the Storage account's name. +func (c *SharedKeyCredential) AccountName() string { + return c.accountName +} + +// SetAccountKey replaces the existing account key with the specified account key. +func (c *SharedKeyCredential) SetAccountKey(accountKey string) error { + _bytes, err := base64.StdEncoding.DecodeString(accountKey) + if err != nil { + return fmt.Errorf("decode account key: %w", err) + } + c.accountKey.Store(_bytes) + return nil +} + +// ComputeHMACSHA256 generates a hash signature for an HTTP request or for a SAS. +func (c *SharedKeyCredential) computeHMACSHA256(message string) (string, error) { + h := hmac.New(sha256.New, c.accountKey.Load().([]byte)) + _, err := h.Write([]byte(message)) + return base64.StdEncoding.EncodeToString(h.Sum(nil)), err +} + +func (c *SharedKeyCredential) buildStringToSign(req *http.Request) (string, error) { + // https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services + headers := req.Header + contentLength := getHeader(shared.HeaderContentLength, headers) + if contentLength == "0" { + contentLength = "" + } + + canonicalizedResource, err := c.buildCanonicalizedResource(req.URL) + if err != nil { + return "", err + } + + stringToSign := strings.Join([]string{ + req.Method, + getHeader(shared.HeaderContentEncoding, headers), + getHeader(shared.HeaderContentLanguage, headers), + contentLength, + getHeader(shared.HeaderContentMD5, headers), + getHeader(shared.HeaderContentType, headers), + "", // Empty date because x-ms-date is expected (as per web page above) + getHeader(shared.HeaderIfModifiedSince, headers), + getHeader(shared.HeaderIfMatch, headers), + getHeader(shared.HeaderIfNoneMatch, headers), + getHeader(shared.HeaderIfUnmodifiedSince, headers), + getHeader(shared.HeaderRange, headers), + c.buildCanonicalizedHeader(headers), + canonicalizedResource, + }, "\n") + return stringToSign, nil +} + +func getHeader(key string, headers map[string][]string) string { + if headers == nil { + return "" + } + if v, ok := headers[key]; ok { + if len(v) > 0 { + return v[0] + } + } + + return "" +} + +func (c *SharedKeyCredential) buildCanonicalizedHeader(headers http.Header) string { + cm := map[string][]string{} + for k, v := range headers { + headerName := strings.TrimSpace(strings.ToLower(k)) + if strings.HasPrefix(headerName, "x-ms-") { + cm[headerName] = v // NOTE: the value must not have any whitespace around it. + } + } + if len(cm) == 0 { + return "" + } + + keys := make([]string, 0, len(cm)) + for key := range cm { + keys = append(keys, key) + } + sort.Strings(keys) + ch := bytes.NewBufferString("") + for i, key := range keys { + if i > 0 { + ch.WriteRune('\n') + } + ch.WriteString(key) + ch.WriteRune(':') + ch.WriteString(strings.Join(cm[key], ",")) + } + return ch.String() +} + +func (c *SharedKeyCredential) buildCanonicalizedResource(u *url.URL) (string, error) { + // https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services + cr := bytes.NewBufferString("/") + cr.WriteString(c.accountName) + + if len(u.Path) > 0 { + // Any portion of the CanonicalizedResource string that is derived from + // the resource's URI should be encoded exactly as it is in the URI. + // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx + cr.WriteString(u.EscapedPath()) + } else { + // a slash is required to indicate the root path + cr.WriteString("/") + } + + // params is a map[string][]string; param name is key; params values is []string + params, err := url.ParseQuery(u.RawQuery) // Returns URL decoded values + if err != nil { + return "", fmt.Errorf("failed to parse query params: %w", err) + } + + if len(params) > 0 { // There is at least 1 query parameter + var paramNames []string // We use this to sort the parameter key names + for paramName := range params { + paramNames = append(paramNames, paramName) // paramNames must be lowercase + } + sort.Strings(paramNames) + + for _, paramName := range paramNames { + paramValues := params[paramName] + sort.Strings(paramValues) + + // Join the sorted key values separated by ',' + // Then prepend "keyName:"; then add this string to the buffer + cr.WriteString("\n" + paramName + ":" + strings.Join(paramValues, ",")) + } + } + return cr.String(), nil +} + +// ComputeHMACSHA256 is a helper for computing the signed string outside of this package. +func ComputeHMACSHA256(cred *SharedKeyCredential, message string) (string, error) { + return cred.computeHMACSHA256(message) +} + +// the following content isn't actually exported but must live +// next to SharedKeyCredential as it uses its unexported methods + +type SharedKeyCredPolicy struct { + cred *SharedKeyCredential +} + +func NewSharedKeyCredPolicy(cred *SharedKeyCredential) *SharedKeyCredPolicy { + return &SharedKeyCredPolicy{cred: cred} +} + +func (s *SharedKeyCredPolicy) Do(req *policy.Request) (*http.Response, error) { + if d := getHeader(shared.HeaderXmsDate, req.Raw().Header); d == "" { + req.Raw().Header.Set(shared.HeaderXmsDate, time.Now().UTC().Format(http.TimeFormat)) + } + stringToSign, err := s.cred.buildStringToSign(req.Raw()) + if err != nil { + return nil, err + } + signature, err := s.cred.computeHMACSHA256(stringToSign) + if err != nil { + return nil, err + } + authHeader := strings.Join([]string{"SharedKey ", s.cred.AccountName(), ":", signature}, "") + req.Raw().Header.Set(shared.HeaderAuthorization, authHeader) + + response, err := req.Next() + if err != nil && response != nil && response.StatusCode == http.StatusForbidden { + // Service failed to authenticate request, log it + log.Write(azlog.EventResponse, "===== HTTP Forbidden status, String-to-Sign:\n"+stringToSign+"\n===============================\n") + } + return response, err +} diff --git a/sdk/storage/azfile/internal/exported/smb_property.go b/sdk/storage/azfile/internal/exported/smb_property.go new file mode 100644 index 000000000000..894e9455b760 --- /dev/null +++ b/sdk/storage/azfile/internal/exported/smb_property.go @@ -0,0 +1,98 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" + "strings" + "time" +) + +// SMBProperties contains the optional parameters regarding the SMB/NTFS properties for a file. +type SMBProperties struct { + // NTFSFileAttributes for Files and Directories. Default value is 'None' for file and + // 'Directory' for directory. ‘None’ can also be specified as default. + Attributes *NTFSFileAttributes + // The Coordinated Universal Time (UTC) creation time for the file/directory. Default value is 'now'. + CreationTime *time.Time + // The Coordinated Universal Time (UTC) last write time for the file/directory. Default value is 'now'. + LastWriteTime *time.Time +} + +// Format returns file attributes, creation time and last write time. +func (sp *SMBProperties) Format(isDir bool, defaultFileAttributes string, defaultCurrentTimeString string) (fileAttributes string, creationTime string, lastWriteTime string) { + if sp == nil { + return defaultFileAttributes, defaultCurrentTimeString, defaultCurrentTimeString + } + + fileAttributes = defaultFileAttributes + if sp.Attributes != nil { + fileAttributes = sp.Attributes.String() + if fileAttributes == "" { + fileAttributes = defaultFileAttributes + } else if isDir && strings.ToLower(fileAttributes) != "none" { + // Directories need to have this attribute included, if setting any attributes. + fileAttributes += "|Directory" + } + } + + creationTime = defaultCurrentTimeString + if sp.CreationTime != nil { + creationTime = sp.CreationTime.UTC().Format(generated.ISO8601) + } + + lastWriteTime = defaultCurrentTimeString + if sp.LastWriteTime != nil { + lastWriteTime = sp.LastWriteTime.UTC().Format(generated.ISO8601) + } + + return +} + +// NTFSFileAttributes for Files and Directories. +// The subset of attributes is listed at: https://learn.microsoft.com/en-us/rest/api/storageservices/set-file-properties#file-system-attributes. +type NTFSFileAttributes struct { + ReadOnly, Hidden, System, Directory, Archive, None, Temporary, Offline, NotContentIndexed, NoScrubData bool +} + +// String returns a string representation of NTFSFileAttributes. +func (f *NTFSFileAttributes) String() string { + fileAttributes := "" + if f.ReadOnly { + fileAttributes += "ReadOnly|" + } + if f.Hidden { + fileAttributes += "Hidden|" + } + if f.System { + fileAttributes += "System|" + } + if f.Directory { + fileAttributes += "Directory|" + } + if f.Archive { + fileAttributes += "Archive|" + } + if f.None { + fileAttributes += "None|" + } + if f.Temporary { + fileAttributes += "Temporary|" + } + if f.Offline { + fileAttributes += "Offline|" + } + if f.NotContentIndexed { + fileAttributes += "NotContentIndexed|" + } + if f.NoScrubData { + fileAttributes += "NoScrubData|" + } + + fileAttributes = strings.TrimSuffix(fileAttributes, "|") + return fileAttributes +} diff --git a/sdk/storage/azfile/internal/exported/transfer_validation_option.go b/sdk/storage/azfile/internal/exported/transfer_validation_option.go new file mode 100644 index 000000000000..ae8df1ea0def --- /dev/null +++ b/sdk/storage/azfile/internal/exported/transfer_validation_option.go @@ -0,0 +1,28 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" + "io" +) + +// TransferValidationType abstracts the various mechanisms used to verify a transfer. +type TransferValidationType interface { + Apply(io.ReadSeekCloser, generated.TransactionalContentSetter) (io.ReadSeekCloser, error) + notPubliclyImplementable() +} + +// TransferValidationTypeMD5 is a TransferValidationType used to provide a precomputed MD5. +type TransferValidationTypeMD5 []byte + +func (c TransferValidationTypeMD5) Apply(rsc io.ReadSeekCloser, cfg generated.TransactionalContentSetter) (io.ReadSeekCloser, error) { + cfg.SetMD5(c) + return rsc, nil +} + +func (TransferValidationTypeMD5) notPubliclyImplementable() {} diff --git a/sdk/storage/azfile/internal/exported/version.go b/sdk/storage/azfile/internal/exported/version.go new file mode 100644 index 000000000000..8e130784dbf2 --- /dev/null +++ b/sdk/storage/azfile/internal/exported/version.go @@ -0,0 +1,12 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +const ( + ModuleName = "azfile" + ModuleVersion = "v0.1.0" +) diff --git a/sdk/storage/azfile/internal/generated/autorest.md b/sdk/storage/azfile/internal/generated/autorest.md new file mode 100644 index 000000000000..634ccff33f46 --- /dev/null +++ b/sdk/storage/azfile/internal/generated/autorest.md @@ -0,0 +1,309 @@ +# Code Generation - Azure File SDK for Golang + +### Settings + +```yaml +go: true +clear-output-folder: false +version: "^3.0.0" +license-header: MICROSOFT_MIT_NO_VERSION +input-file: "https://raw.githubusercontent.com/Azure/azure-rest-api-specs/bbea558ac43d6ebec72455233c84b0158c89fcda/specification/storage/data-plane/Microsoft.FileStorage/preview/2020-10-02/file.json" +credential-scope: "https://storage.azure.com/.default" +output-folder: ../generated +file-prefix: "zz_" +openapi-type: "data-plane" +verbose: true +security: AzureKey +modelerfour: + group-parameters: false + seal-single-value-enum-by-default: true + lenient-model-deduplication: true +export-clients: true +use: "@autorest/go@4.0.0-preview.45" +``` + +### Don't include share name, directory, or file name in path - we have direct URIs + +``` yaml +directive: +- from: swagger-document + where: $["x-ms-paths"] + transform: > + for (const property in $) + { + if (property.includes('/{shareName}/{directory}/{fileName}')) + { + $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ShareName") && false == param['$ref'].endsWith("#/parameters/DirectoryPath") && false == param['$ref'].endsWith("#/parameters/FilePath"))}); + } + else if (property.includes('/{shareName}/{directory}')) + { + $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ShareName") && false == param['$ref'].endsWith("#/parameters/DirectoryPath"))}); + } + else if (property.includes('/{shareName}')) + { + $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ShareName"))}); + } + } +``` + +### Add Last-Modified to SetMetadata + +``` yaml +directive: +- from: swagger-document + where: $["x-ms-paths"]["/{shareName}/{directory}/{fileName}?comp=metadata"] + transform: > + $.put.responses["200"].headers["Last-Modified"] = { + "type": "string", + "format": "date-time-rfc1123", + "description": "Returns the date and time the file was last modified. Any operation that modifies the file, including an update of the file's metadata or properties, changes the last-modified time of the file." + } +``` + +### Add Content-MD5 to Put Range from URL + +``` yaml +directive: +- from: swagger-document + where: $["x-ms-paths"]["/{shareName}/{directory}/{fileName}?comp=range&fromURL"] + transform: > + $.put.responses["201"].headers["Content-MD5"] = { + "type": "string", + "format": "byte", + "description": "This header is returned so that the client can check for message content integrity. The value of this header is computed by the File service; it is not necessarily the same value as may have been specified in the request headers." + } +``` + +### Rename FileHttpHeaders to ShareFileHTTPHeaders and remove file prefix from properties + +``` yaml +directive: +- from: swagger-document + where: $.parameters + transform: > + $.FileCacheControl["x-ms-parameter-grouping"].name = "share-file-http-headers"; + $.FileCacheControl["x-ms-client-name"] = "cacheControl"; + $.FileContentDisposition["x-ms-parameter-grouping"].name = "share-file-http-headers"; + $.FileContentDisposition["x-ms-client-name"] = "contentDisposition"; + $.FileContentEncoding["x-ms-parameter-grouping"].name = "share-file-http-headers"; + $.FileContentEncoding["x-ms-client-name"] = "contentEncoding"; + $.FileContentLanguage["x-ms-parameter-grouping"].name = "share-file-http-headers"; + $.FileContentLanguage["x-ms-client-name"] = "contentLanguage"; + $.FileContentMD5["x-ms-parameter-grouping"].name = "share-file-http-headers"; + $.FileContentMD5["x-ms-client-name"] = "contentMd5"; + $.FileContentType["x-ms-parameter-grouping"].name = "share-file-http-headers"; + $.FileContentType["x-ms-client-name"] = "contentType"; +``` + +### use azcore.ETag + +``` yaml +directive: +- from: zz_models.go + where: $ + transform: >- + return $. + replace(/import "time"/, `import (\n\t"time"\n\t"github.com/Azure/azure-sdk-for-go/sdk/azcore"\n)`). + replace(/Etag\s+\*string/g, `ETag *azcore.ETag`); + +- from: zz_response_types.go + where: $ + transform: >- + return $. + replace(/"time"/, `"time"\n\t"github.com/Azure/azure-sdk-for-go/sdk/azcore"`). + replace(/ETag\s+\*string/g, `ETag *azcore.ETag`); + +- from: + - zz_directory_client.go + - zz_file_client.go + - zz_share_client.go + where: $ + transform: >- + return $. + replace(/"github\.com\/Azure\/azure\-sdk\-for\-go\/sdk\/azcore\/policy"/, `"github.com/Azure/azure-sdk-for-go/sdk/azcore"\n\t"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"`). + replace(/result\.ETag\s+=\s+&val/g, `result.ETag = (*azcore.ETag)(&val)`); +``` + +### Rename models - remove `Share` prefix + +``` yaml +directive: +- rename-model: + from: ShareProtocolSettings + to: ProtocolSettings +- rename-model: + from: ShareSmbSettings + to: SMBSettings +``` + +### Capitalise SMB field + +``` yaml +directive: +- from: + - zz_file_client.go + - zz_models.go + where: $ + transform: >- + return $. + replace(/SmbMultichannel/g, `SMBMultichannel`). + replace(/copyFileSmbInfo/g, `copyFileSMBInfo`). + replace(/CopyFileSmbInfo/g, `CopyFileSMBInfo`). + replace(/Smb\s+\*ShareSMBSettings/g, `SMB *ShareSMBSettings`); +``` + +### Rename models - remove `Item` and `Internal` suffix + +``` yaml +directive: +- rename-model: + from: DirectoryItem + to: Directory +- rename-model: + from: FileItem + to: File +- rename-model: + from: HandleItem + to: Handle +- rename-model: + from: ShareItemInternal + to: Share +- rename-model: + from: SharePropertiesInternal + to: ShareProperties +``` + +### Remove `Items` and `List` suffix + +``` yaml +directive: + - from: source-file-go + where: $ + transform: >- + return $. + replace(/DirectoryItems/g, "Directories"). + replace(/FileItems/g, "Files"). + replace(/ShareItems/g, "Shares"). + replace(/HandleList/g, "Handles"); +``` + +### Rename `FileID` to `ID` (except for Handle object) + +``` yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + $.Directory.properties.FileId["x-ms-client-name"] = "ID"; + $.File.properties.FileId["x-ms-client-name"] = "ID"; + $.Handle.properties.HandleId["x-ms-client-name"] = "ID"; + +- from: + - zz_directory_client.go + - zz_file_client.go + - zz_response_types.go + where: $ + transform: >- + return $. + replace(/FileID/g, `ID`); +``` + + +### Change CORS acronym to be all caps and rename `FileParentID` to `ParentID` + +``` yaml +directive: + - from: source-file-go + where: $ + transform: >- + return $. + replace(/Cors/g, "CORS"). + replace(/FileParentID/g, "ParentID"); +``` + +### Change cors xml to be correct + +``` yaml +directive: + - from: source-file-go + where: $ + transform: >- + return $. + replace(/xml:"CORS>CORSRule"/g, "xml:\"Cors>CorsRule\""); +``` + +### Remove pager methods and export various generated methods in service client + +``` yaml +directive: + - from: zz_service_client.go + where: $ + transform: >- + return $. + replace(/func \(client \*ServiceClient\) NewListSharesSegmentPager\(.+\/\/ listSharesSegmentCreateRequest creates the ListSharesSegment request/s, `//\n// listSharesSegmentCreateRequest creates the ListSharesSegment request`). + replace(/\(client \*ServiceClient\) listSharesSegmentCreateRequest\(/, `(client *ServiceClient) ListSharesSegmentCreateRequest(`). + replace(/\(client \*ServiceClient\) listSharesSegmentHandleResponse\(/, `(client *ServiceClient) ListSharesSegmentHandleResponse(`); +``` + +### Use string type for FileCreationTime and FileLastWriteTime + +``` yaml +directive: +- from: swagger-document + where: $.parameters.FileCreationTime + transform: > + $.format = "str"; +- from: swagger-document + where: $.parameters.FileLastWriteTime + transform: > + $.format = "str"; +``` + +### Remove pager methods and export various generated methods in directory client + +``` yaml +directive: + - from: zz_directory_client.go + where: $ + transform: >- + return $. + replace(/func \(client \*DirectoryClient\) NewListFilesAndDirectoriesSegmentPager\(.+\/\/ listFilesAndDirectoriesSegmentCreateRequest creates the ListFilesAndDirectoriesSegment request/s, `//\n// listFilesAndDirectoriesSegmentCreateRequest creates the ListFilesAndDirectoriesSegment request`). + replace(/\(client \*DirectoryClient\) listFilesAndDirectoriesSegmentCreateRequest\(/, `(client *DirectoryClient) ListFilesAndDirectoriesSegmentCreateRequest(`). + replace(/\(client \*DirectoryClient\) listFilesAndDirectoriesSegmentHandleResponse\(/, `(client *DirectoryClient) ListFilesAndDirectoriesSegmentHandleResponse(`); +``` + +### Fix time format for parsing the response headers: x-ms-file-creation-time, x-ms-file-last-write-time, x-ms-file-change-time + +``` yaml +directive: + - from: + - zz_directory_client.go + - zz_file_client.go + where: $ + transform: >- + return $. + replace(/fileCreationTime,\s+err\s+\:=\s+time\.Parse\(time\.RFC1123,\s+val\)/g, `fileCreationTime, err := time.Parse(ISO8601, val)`). + replace(/fileLastWriteTime,\s+err\s+\:=\s+time\.Parse\(time\.RFC1123,\s+val\)/g, `fileLastWriteTime, err := time.Parse(ISO8601, val)`). + replace(/fileChangeTime,\s+err\s+\:=\s+time\.Parse\(time\.RFC1123,\s+val\)/g, `fileChangeTime, err := time.Parse(ISO8601, val)`); +``` + +### Change `Duration` parameter in leases to be required + +``` yaml +directive: +- from: swagger-document + where: $.parameters.LeaseDuration + transform: > + $.required = true; +``` + +### Convert ShareUsageBytes to int64 + +``` yaml +directive: + - from: zz_models.go + where: $ + transform: >- + return $. + replace(/ShareUsageBytes\s+\*int32/g, `ShareUsageBytes *int64`); +``` diff --git a/sdk/storage/azfile/internal/generated/build.go b/sdk/storage/azfile/internal/generated/build.go new file mode 100644 index 000000000000..57f112001bd2 --- /dev/null +++ b/sdk/storage/azfile/internal/generated/build.go @@ -0,0 +1,10 @@ +//go:build go1.18 +// +build go1.18 + +//go:generate autorest ./autorest.md +//go:generate gofmt -w . + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated diff --git a/sdk/storage/azfile/internal/generated/directory_client.go b/sdk/storage/azfile/internal/generated/directory_client.go new file mode 100644 index 000000000000..11a75a9f50c8 --- /dev/null +++ b/sdk/storage/azfile/internal/generated/directory_client.go @@ -0,0 +1,22 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated + +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + +const ( + // ISO8601 is used for formatting file creation, last write and change time. + ISO8601 = "2006-01-02T15:04:05.0000000Z07:00" +) + +func (client *DirectoryClient) Endpoint() string { + return client.endpoint +} + +func (client *DirectoryClient) Pipeline() runtime.Pipeline { + return client.pl +} diff --git a/sdk/storage/azfile/internal/generated/file_client.go b/sdk/storage/azfile/internal/generated/file_client.go new file mode 100644 index 000000000000..f4a01a783938 --- /dev/null +++ b/sdk/storage/azfile/internal/generated/file_client.go @@ -0,0 +1,17 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated + +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + +func (client *FileClient) Endpoint() string { + return client.endpoint +} + +func (client *FileClient) Pipeline() runtime.Pipeline { + return client.pl +} diff --git a/sdk/storage/azfile/internal/generated/models.go b/sdk/storage/azfile/internal/generated/models.go new file mode 100644 index 000000000000..6450b7de2e82 --- /dev/null +++ b/sdk/storage/azfile/internal/generated/models.go @@ -0,0 +1,25 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated + +type TransactionalContentSetter interface { + SetMD5([]byte) + // add SetCRC64() when Azure File service starts supporting it. +} + +func (f *FileClientUploadRangeOptions) SetMD5(v []byte) { + f.ContentMD5 = v +} + +type SourceContentSetter interface { + SetSourceContentCRC64(v []byte) + // add SetSourceContentMD5() when Azure File service starts supporting it. +} + +func (f *FileClientUploadRangeFromURLOptions) SetSourceContentCRC64(v []byte) { + f.SourceContentCRC64 = v +} diff --git a/sdk/storage/azfile/internal/generated/service_client.go b/sdk/storage/azfile/internal/generated/service_client.go new file mode 100644 index 000000000000..1f449b955e82 --- /dev/null +++ b/sdk/storage/azfile/internal/generated/service_client.go @@ -0,0 +1,17 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated + +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + +func (client *ServiceClient) Endpoint() string { + return client.endpoint +} + +func (client *ServiceClient) Pipeline() runtime.Pipeline { + return client.pl +} diff --git a/sdk/storage/azfile/internal/generated/share_client.go b/sdk/storage/azfile/internal/generated/share_client.go new file mode 100644 index 000000000000..040785814606 --- /dev/null +++ b/sdk/storage/azfile/internal/generated/share_client.go @@ -0,0 +1,17 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated + +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + +func (client *ShareClient) Endpoint() string { + return client.endpoint +} + +func (client *ShareClient) Pipeline() runtime.Pipeline { + return client.pl +} diff --git a/sdk/storage/azfile/internal/generated/zz_constants.go b/sdk/storage/azfile/internal/generated/zz_constants.go new file mode 100644 index 000000000000..13ee55aa841e --- /dev/null +++ b/sdk/storage/azfile/internal/generated/zz_constants.go @@ -0,0 +1,342 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +type CopyStatusType string + +const ( + CopyStatusTypePending CopyStatusType = "pending" + CopyStatusTypeSuccess CopyStatusType = "success" + CopyStatusTypeAborted CopyStatusType = "aborted" + CopyStatusTypeFailed CopyStatusType = "failed" +) + +// PossibleCopyStatusTypeValues returns the possible values for the CopyStatusType const type. +func PossibleCopyStatusTypeValues() []CopyStatusType { + return []CopyStatusType{ + CopyStatusTypePending, + CopyStatusTypeSuccess, + CopyStatusTypeAborted, + CopyStatusTypeFailed, + } +} + +type DeleteSnapshotsOptionType string + +const ( + DeleteSnapshotsOptionTypeInclude DeleteSnapshotsOptionType = "include" + DeleteSnapshotsOptionTypeIncludeLeased DeleteSnapshotsOptionType = "include-leased" +) + +// PossibleDeleteSnapshotsOptionTypeValues returns the possible values for the DeleteSnapshotsOptionType const type. +func PossibleDeleteSnapshotsOptionTypeValues() []DeleteSnapshotsOptionType { + return []DeleteSnapshotsOptionType{ + DeleteSnapshotsOptionTypeInclude, + DeleteSnapshotsOptionTypeIncludeLeased, + } +} + +type FileRangeWriteType string + +const ( + FileRangeWriteTypeUpdate FileRangeWriteType = "update" + FileRangeWriteTypeClear FileRangeWriteType = "clear" +) + +// PossibleFileRangeWriteTypeValues returns the possible values for the FileRangeWriteType const type. +func PossibleFileRangeWriteTypeValues() []FileRangeWriteType { + return []FileRangeWriteType{ + FileRangeWriteTypeUpdate, + FileRangeWriteTypeClear, + } +} + +// LeaseDurationType - When a share is leased, specifies whether the lease is of infinite or fixed duration. +type LeaseDurationType string + +const ( + LeaseDurationTypeInfinite LeaseDurationType = "infinite" + LeaseDurationTypeFixed LeaseDurationType = "fixed" +) + +// PossibleLeaseDurationTypeValues returns the possible values for the LeaseDurationType const type. +func PossibleLeaseDurationTypeValues() []LeaseDurationType { + return []LeaseDurationType{ + LeaseDurationTypeInfinite, + LeaseDurationTypeFixed, + } +} + +// LeaseStateType - Lease state of the share. +type LeaseStateType string + +const ( + LeaseStateTypeAvailable LeaseStateType = "available" + LeaseStateTypeLeased LeaseStateType = "leased" + LeaseStateTypeExpired LeaseStateType = "expired" + LeaseStateTypeBreaking LeaseStateType = "breaking" + LeaseStateTypeBroken LeaseStateType = "broken" +) + +// PossibleLeaseStateTypeValues returns the possible values for the LeaseStateType const type. +func PossibleLeaseStateTypeValues() []LeaseStateType { + return []LeaseStateType{ + LeaseStateTypeAvailable, + LeaseStateTypeLeased, + LeaseStateTypeExpired, + LeaseStateTypeBreaking, + LeaseStateTypeBroken, + } +} + +// LeaseStatusType - The current lease status of the share. +type LeaseStatusType string + +const ( + LeaseStatusTypeLocked LeaseStatusType = "locked" + LeaseStatusTypeUnlocked LeaseStatusType = "unlocked" +) + +// PossibleLeaseStatusTypeValues returns the possible values for the LeaseStatusType const type. +func PossibleLeaseStatusTypeValues() []LeaseStatusType { + return []LeaseStatusType{ + LeaseStatusTypeLocked, + LeaseStatusTypeUnlocked, + } +} + +type ListFilesIncludeType string + +const ( + ListFilesIncludeTypeTimestamps ListFilesIncludeType = "Timestamps" + ListFilesIncludeTypeEtag ListFilesIncludeType = "Etag" + ListFilesIncludeTypeAttributes ListFilesIncludeType = "Attributes" + ListFilesIncludeTypePermissionKey ListFilesIncludeType = "PermissionKey" +) + +// PossibleListFilesIncludeTypeValues returns the possible values for the ListFilesIncludeType const type. +func PossibleListFilesIncludeTypeValues() []ListFilesIncludeType { + return []ListFilesIncludeType{ + ListFilesIncludeTypeTimestamps, + ListFilesIncludeTypeEtag, + ListFilesIncludeTypeAttributes, + ListFilesIncludeTypePermissionKey, + } +} + +type ListSharesIncludeType string + +const ( + ListSharesIncludeTypeSnapshots ListSharesIncludeType = "snapshots" + ListSharesIncludeTypeMetadata ListSharesIncludeType = "metadata" + ListSharesIncludeTypeDeleted ListSharesIncludeType = "deleted" +) + +// PossibleListSharesIncludeTypeValues returns the possible values for the ListSharesIncludeType const type. +func PossibleListSharesIncludeTypeValues() []ListSharesIncludeType { + return []ListSharesIncludeType{ + ListSharesIncludeTypeSnapshots, + ListSharesIncludeTypeMetadata, + ListSharesIncludeTypeDeleted, + } +} + +type PermissionCopyModeType string + +const ( + PermissionCopyModeTypeSource PermissionCopyModeType = "source" + PermissionCopyModeTypeOverride PermissionCopyModeType = "override" +) + +// PossiblePermissionCopyModeTypeValues returns the possible values for the PermissionCopyModeType const type. +func PossiblePermissionCopyModeTypeValues() []PermissionCopyModeType { + return []PermissionCopyModeType{ + PermissionCopyModeTypeSource, + PermissionCopyModeTypeOverride, + } +} + +type ShareAccessTier string + +const ( + ShareAccessTierCool ShareAccessTier = "Cool" + ShareAccessTierHot ShareAccessTier = "Hot" + ShareAccessTierTransactionOptimized ShareAccessTier = "TransactionOptimized" +) + +// PossibleShareAccessTierValues returns the possible values for the ShareAccessTier const type. +func PossibleShareAccessTierValues() []ShareAccessTier { + return []ShareAccessTier{ + ShareAccessTierCool, + ShareAccessTierHot, + ShareAccessTierTransactionOptimized, + } +} + +type ShareRootSquash string + +const ( + ShareRootSquashNoRootSquash ShareRootSquash = "NoRootSquash" + ShareRootSquashRootSquash ShareRootSquash = "RootSquash" + ShareRootSquashAllSquash ShareRootSquash = "AllSquash" +) + +// PossibleShareRootSquashValues returns the possible values for the ShareRootSquash const type. +func PossibleShareRootSquashValues() []ShareRootSquash { + return []ShareRootSquash{ + ShareRootSquashNoRootSquash, + ShareRootSquashRootSquash, + ShareRootSquashAllSquash, + } +} + +// StorageErrorCode - Error codes returned by the service +type StorageErrorCode string + +const ( + StorageErrorCodeAccountAlreadyExists StorageErrorCode = "AccountAlreadyExists" + StorageErrorCodeAccountBeingCreated StorageErrorCode = "AccountBeingCreated" + StorageErrorCodeAccountIsDisabled StorageErrorCode = "AccountIsDisabled" + StorageErrorCodeAuthenticationFailed StorageErrorCode = "AuthenticationFailed" + StorageErrorCodeAuthorizationFailure StorageErrorCode = "AuthorizationFailure" + StorageErrorCodeAuthorizationPermissionMismatch StorageErrorCode = "AuthorizationPermissionMismatch" + StorageErrorCodeAuthorizationProtocolMismatch StorageErrorCode = "AuthorizationProtocolMismatch" + StorageErrorCodeAuthorizationResourceTypeMismatch StorageErrorCode = "AuthorizationResourceTypeMismatch" + StorageErrorCodeAuthorizationServiceMismatch StorageErrorCode = "AuthorizationServiceMismatch" + StorageErrorCodeAuthorizationSourceIPMismatch StorageErrorCode = "AuthorizationSourceIPMismatch" + StorageErrorCodeCannotDeleteFileOrDirectory StorageErrorCode = "CannotDeleteFileOrDirectory" + StorageErrorCodeClientCacheFlushDelay StorageErrorCode = "ClientCacheFlushDelay" + StorageErrorCodeConditionHeadersNotSupported StorageErrorCode = "ConditionHeadersNotSupported" + StorageErrorCodeConditionNotMet StorageErrorCode = "ConditionNotMet" + StorageErrorCodeContainerQuotaDowngradeNotAllowed StorageErrorCode = "ContainerQuotaDowngradeNotAllowed" + StorageErrorCodeDeletePending StorageErrorCode = "DeletePending" + StorageErrorCodeDirectoryNotEmpty StorageErrorCode = "DirectoryNotEmpty" + StorageErrorCodeEmptyMetadataKey StorageErrorCode = "EmptyMetadataKey" + StorageErrorCodeFeatureVersionMismatch StorageErrorCode = "FeatureVersionMismatch" + StorageErrorCodeFileLockConflict StorageErrorCode = "FileLockConflict" + StorageErrorCodeInsufficientAccountPermissions StorageErrorCode = "InsufficientAccountPermissions" + StorageErrorCodeInternalError StorageErrorCode = "InternalError" + StorageErrorCodeInvalidAuthenticationInfo StorageErrorCode = "InvalidAuthenticationInfo" + StorageErrorCodeInvalidFileOrDirectoryPathName StorageErrorCode = "InvalidFileOrDirectoryPathName" + StorageErrorCodeInvalidHTTPVerb StorageErrorCode = "InvalidHttpVerb" + StorageErrorCodeInvalidHeaderValue StorageErrorCode = "InvalidHeaderValue" + StorageErrorCodeInvalidInput StorageErrorCode = "InvalidInput" + StorageErrorCodeInvalidMD5 StorageErrorCode = "InvalidMd5" + StorageErrorCodeInvalidMetadata StorageErrorCode = "InvalidMetadata" + StorageErrorCodeInvalidQueryParameterValue StorageErrorCode = "InvalidQueryParameterValue" + StorageErrorCodeInvalidRange StorageErrorCode = "InvalidRange" + StorageErrorCodeInvalidResourceName StorageErrorCode = "InvalidResourceName" + StorageErrorCodeInvalidURI StorageErrorCode = "InvalidUri" + StorageErrorCodeInvalidXMLDocument StorageErrorCode = "InvalidXmlDocument" + StorageErrorCodeInvalidXMLNodeValue StorageErrorCode = "InvalidXmlNodeValue" + StorageErrorCodeMD5Mismatch StorageErrorCode = "Md5Mismatch" + StorageErrorCodeMetadataTooLarge StorageErrorCode = "MetadataTooLarge" + StorageErrorCodeMissingContentLengthHeader StorageErrorCode = "MissingContentLengthHeader" + StorageErrorCodeMissingRequiredHeader StorageErrorCode = "MissingRequiredHeader" + StorageErrorCodeMissingRequiredQueryParameter StorageErrorCode = "MissingRequiredQueryParameter" + StorageErrorCodeMissingRequiredXMLNode StorageErrorCode = "MissingRequiredXmlNode" + StorageErrorCodeMultipleConditionHeadersNotSupported StorageErrorCode = "MultipleConditionHeadersNotSupported" + StorageErrorCodeOperationTimedOut StorageErrorCode = "OperationTimedOut" + StorageErrorCodeOutOfRangeInput StorageErrorCode = "OutOfRangeInput" + StorageErrorCodeOutOfRangeQueryParameterValue StorageErrorCode = "OutOfRangeQueryParameterValue" + StorageErrorCodeParentNotFound StorageErrorCode = "ParentNotFound" + StorageErrorCodeReadOnlyAttribute StorageErrorCode = "ReadOnlyAttribute" + StorageErrorCodeRequestBodyTooLarge StorageErrorCode = "RequestBodyTooLarge" + StorageErrorCodeRequestURLFailedToParse StorageErrorCode = "RequestUrlFailedToParse" + StorageErrorCodeResourceAlreadyExists StorageErrorCode = "ResourceAlreadyExists" + StorageErrorCodeResourceNotFound StorageErrorCode = "ResourceNotFound" + StorageErrorCodeResourceTypeMismatch StorageErrorCode = "ResourceTypeMismatch" + StorageErrorCodeServerBusy StorageErrorCode = "ServerBusy" + StorageErrorCodeShareAlreadyExists StorageErrorCode = "ShareAlreadyExists" + StorageErrorCodeShareBeingDeleted StorageErrorCode = "ShareBeingDeleted" + StorageErrorCodeShareDisabled StorageErrorCode = "ShareDisabled" + StorageErrorCodeShareHasSnapshots StorageErrorCode = "ShareHasSnapshots" + StorageErrorCodeShareNotFound StorageErrorCode = "ShareNotFound" + StorageErrorCodeShareSnapshotCountExceeded StorageErrorCode = "ShareSnapshotCountExceeded" + StorageErrorCodeShareSnapshotInProgress StorageErrorCode = "ShareSnapshotInProgress" + StorageErrorCodeShareSnapshotOperationNotSupported StorageErrorCode = "ShareSnapshotOperationNotSupported" + StorageErrorCodeSharingViolation StorageErrorCode = "SharingViolation" + StorageErrorCodeUnsupportedHTTPVerb StorageErrorCode = "UnsupportedHttpVerb" + StorageErrorCodeUnsupportedHeader StorageErrorCode = "UnsupportedHeader" + StorageErrorCodeUnsupportedQueryParameter StorageErrorCode = "UnsupportedQueryParameter" + StorageErrorCodeUnsupportedXMLNode StorageErrorCode = "UnsupportedXmlNode" +) + +// PossibleStorageErrorCodeValues returns the possible values for the StorageErrorCode const type. +func PossibleStorageErrorCodeValues() []StorageErrorCode { + return []StorageErrorCode{ + StorageErrorCodeAccountAlreadyExists, + StorageErrorCodeAccountBeingCreated, + StorageErrorCodeAccountIsDisabled, + StorageErrorCodeAuthenticationFailed, + StorageErrorCodeAuthorizationFailure, + StorageErrorCodeAuthorizationPermissionMismatch, + StorageErrorCodeAuthorizationProtocolMismatch, + StorageErrorCodeAuthorizationResourceTypeMismatch, + StorageErrorCodeAuthorizationServiceMismatch, + StorageErrorCodeAuthorizationSourceIPMismatch, + StorageErrorCodeCannotDeleteFileOrDirectory, + StorageErrorCodeClientCacheFlushDelay, + StorageErrorCodeConditionHeadersNotSupported, + StorageErrorCodeConditionNotMet, + StorageErrorCodeContainerQuotaDowngradeNotAllowed, + StorageErrorCodeDeletePending, + StorageErrorCodeDirectoryNotEmpty, + StorageErrorCodeEmptyMetadataKey, + StorageErrorCodeFeatureVersionMismatch, + StorageErrorCodeFileLockConflict, + StorageErrorCodeInsufficientAccountPermissions, + StorageErrorCodeInternalError, + StorageErrorCodeInvalidAuthenticationInfo, + StorageErrorCodeInvalidFileOrDirectoryPathName, + StorageErrorCodeInvalidHTTPVerb, + StorageErrorCodeInvalidHeaderValue, + StorageErrorCodeInvalidInput, + StorageErrorCodeInvalidMD5, + StorageErrorCodeInvalidMetadata, + StorageErrorCodeInvalidQueryParameterValue, + StorageErrorCodeInvalidRange, + StorageErrorCodeInvalidResourceName, + StorageErrorCodeInvalidURI, + StorageErrorCodeInvalidXMLDocument, + StorageErrorCodeInvalidXMLNodeValue, + StorageErrorCodeMD5Mismatch, + StorageErrorCodeMetadataTooLarge, + StorageErrorCodeMissingContentLengthHeader, + StorageErrorCodeMissingRequiredHeader, + StorageErrorCodeMissingRequiredQueryParameter, + StorageErrorCodeMissingRequiredXMLNode, + StorageErrorCodeMultipleConditionHeadersNotSupported, + StorageErrorCodeOperationTimedOut, + StorageErrorCodeOutOfRangeInput, + StorageErrorCodeOutOfRangeQueryParameterValue, + StorageErrorCodeParentNotFound, + StorageErrorCodeReadOnlyAttribute, + StorageErrorCodeRequestBodyTooLarge, + StorageErrorCodeRequestURLFailedToParse, + StorageErrorCodeResourceAlreadyExists, + StorageErrorCodeResourceNotFound, + StorageErrorCodeResourceTypeMismatch, + StorageErrorCodeServerBusy, + StorageErrorCodeShareAlreadyExists, + StorageErrorCodeShareBeingDeleted, + StorageErrorCodeShareDisabled, + StorageErrorCodeShareHasSnapshots, + StorageErrorCodeShareNotFound, + StorageErrorCodeShareSnapshotCountExceeded, + StorageErrorCodeShareSnapshotInProgress, + StorageErrorCodeShareSnapshotOperationNotSupported, + StorageErrorCodeSharingViolation, + StorageErrorCodeUnsupportedHTTPVerb, + StorageErrorCodeUnsupportedHeader, + StorageErrorCodeUnsupportedQueryParameter, + StorageErrorCodeUnsupportedXMLNode, + } +} diff --git a/sdk/storage/azfile/internal/generated/zz_directory_client.go b/sdk/storage/azfile/internal/generated/zz_directory_client.go new file mode 100644 index 000000000000..1b1eed71d03f --- /dev/null +++ b/sdk/storage/azfile/internal/generated/zz_directory_client.go @@ -0,0 +1,766 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "context" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "net/http" + "strconv" + "strings" + "time" +) + +// DirectoryClient contains the methods for the Directory group. +// Don't use this type directly, use NewDirectoryClient() instead. +type DirectoryClient struct { + endpoint string + pl runtime.Pipeline +} + +// NewDirectoryClient creates a new instance of DirectoryClient with the specified values. +// - endpoint - The URL of the service account, share, directory or file that is the target of the desired operation. +// - pl - the pipeline used for sending requests and handling responses. +func NewDirectoryClient(endpoint string, pl runtime.Pipeline) *DirectoryClient { + client := &DirectoryClient{ + endpoint: endpoint, + pl: pl, + } + return client +} + +// Create - Creates a new directory under the specified share or parent directory. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - fileAttributes - If specified, the provided file attributes shall be set. Default value: ‘Archive’ for file and ‘Directory’ +// for directory. ‘None’ can also be specified as default. +// - fileCreationTime - Creation time for the file/directory. Default value: Now. +// - fileLastWriteTime - Last write time for the file/directory. Default value: Now. +// - options - DirectoryClientCreateOptions contains the optional parameters for the DirectoryClient.Create method. +func (client *DirectoryClient) Create(ctx context.Context, fileAttributes string, fileCreationTime string, fileLastWriteTime string, options *DirectoryClientCreateOptions) (DirectoryClientCreateResponse, error) { + req, err := client.createCreateRequest(ctx, fileAttributes, fileCreationTime, fileLastWriteTime, options) + if err != nil { + return DirectoryClientCreateResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return DirectoryClientCreateResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return DirectoryClientCreateResponse{}, runtime.NewResponseError(resp) + } + return client.createHandleResponse(resp) +} + +// createCreateRequest creates the Create request. +func (client *DirectoryClient) createCreateRequest(ctx context.Context, fileAttributes string, fileCreationTime string, fileLastWriteTime string, options *DirectoryClientCreateOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "directory") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.FilePermission != nil { + req.Raw().Header["x-ms-file-permission"] = []string{*options.FilePermission} + } + if options != nil && options.FilePermissionKey != nil { + req.Raw().Header["x-ms-file-permission-key"] = []string{*options.FilePermissionKey} + } + req.Raw().Header["x-ms-file-attributes"] = []string{fileAttributes} + req.Raw().Header["x-ms-file-creation-time"] = []string{fileCreationTime} + req.Raw().Header["x-ms-file-last-write-time"] = []string{fileLastWriteTime} + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// createHandleResponse handles the Create response. +func (client *DirectoryClient) createHandleResponse(resp *http.Response) (DirectoryClientCreateResponse, error) { + result := DirectoryClientCreateResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return DirectoryClientCreateResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return DirectoryClientCreateResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return DirectoryClientCreateResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-file-permission-key"); val != "" { + result.FilePermissionKey = &val + } + if val := resp.Header.Get("x-ms-file-attributes"); val != "" { + result.FileAttributes = &val + } + if val := resp.Header.Get("x-ms-file-creation-time"); val != "" { + fileCreationTime, err := time.Parse(ISO8601, val) + if err != nil { + return DirectoryClientCreateResponse{}, err + } + result.FileCreationTime = &fileCreationTime + } + if val := resp.Header.Get("x-ms-file-last-write-time"); val != "" { + fileLastWriteTime, err := time.Parse(ISO8601, val) + if err != nil { + return DirectoryClientCreateResponse{}, err + } + result.FileLastWriteTime = &fileLastWriteTime + } + if val := resp.Header.Get("x-ms-file-change-time"); val != "" { + fileChangeTime, err := time.Parse(ISO8601, val) + if err != nil { + return DirectoryClientCreateResponse{}, err + } + result.FileChangeTime = &fileChangeTime + } + if val := resp.Header.Get("x-ms-file-id"); val != "" { + result.ID = &val + } + if val := resp.Header.Get("x-ms-file-parent-id"); val != "" { + result.ParentID = &val + } + return result, nil +} + +// Delete - Removes the specified empty directory. Note that the directory must be empty before it can be deleted. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - DirectoryClientDeleteOptions contains the optional parameters for the DirectoryClient.Delete method. +func (client *DirectoryClient) Delete(ctx context.Context, options *DirectoryClientDeleteOptions) (DirectoryClientDeleteResponse, error) { + req, err := client.deleteCreateRequest(ctx, options) + if err != nil { + return DirectoryClientDeleteResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return DirectoryClientDeleteResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return DirectoryClientDeleteResponse{}, runtime.NewResponseError(resp) + } + return client.deleteHandleResponse(resp) +} + +// deleteCreateRequest creates the Delete request. +func (client *DirectoryClient) deleteCreateRequest(ctx context.Context, options *DirectoryClientDeleteOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodDelete, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "directory") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// deleteHandleResponse handles the Delete response. +func (client *DirectoryClient) deleteHandleResponse(resp *http.Response) (DirectoryClientDeleteResponse, error) { + result := DirectoryClientDeleteResponse{} + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return DirectoryClientDeleteResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// ForceCloseHandles - Closes all handles open for given directory. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - handleID - Specifies handle ID opened on the file or directory to be closed. Asterisk (‘*’) is a wildcard that specifies +// all handles. +// - options - DirectoryClientForceCloseHandlesOptions contains the optional parameters for the DirectoryClient.ForceCloseHandles +// method. +func (client *DirectoryClient) ForceCloseHandles(ctx context.Context, handleID string, options *DirectoryClientForceCloseHandlesOptions) (DirectoryClientForceCloseHandlesResponse, error) { + req, err := client.forceCloseHandlesCreateRequest(ctx, handleID, options) + if err != nil { + return DirectoryClientForceCloseHandlesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return DirectoryClientForceCloseHandlesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return DirectoryClientForceCloseHandlesResponse{}, runtime.NewResponseError(resp) + } + return client.forceCloseHandlesHandleResponse(resp) +} + +// forceCloseHandlesCreateRequest creates the ForceCloseHandles request. +func (client *DirectoryClient) forceCloseHandlesCreateRequest(ctx context.Context, handleID string, options *DirectoryClientForceCloseHandlesOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "forceclosehandles") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Sharesnapshot != nil { + reqQP.Set("sharesnapshot", *options.Sharesnapshot) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-handle-id"] = []string{handleID} + if options != nil && options.Recursive != nil { + req.Raw().Header["x-ms-recursive"] = []string{strconv.FormatBool(*options.Recursive)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// forceCloseHandlesHandleResponse handles the ForceCloseHandles response. +func (client *DirectoryClient) forceCloseHandlesHandleResponse(resp *http.Response) (DirectoryClientForceCloseHandlesResponse, error) { + result := DirectoryClientForceCloseHandlesResponse{} + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return DirectoryClientForceCloseHandlesResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-marker"); val != "" { + result.Marker = &val + } + if val := resp.Header.Get("x-ms-number-of-handles-closed"); val != "" { + numberOfHandlesClosed32, err := strconv.ParseInt(val, 10, 32) + numberOfHandlesClosed := int32(numberOfHandlesClosed32) + if err != nil { + return DirectoryClientForceCloseHandlesResponse{}, err + } + result.NumberOfHandlesClosed = &numberOfHandlesClosed + } + if val := resp.Header.Get("x-ms-number-of-handles-failed"); val != "" { + numberOfHandlesFailedToClose32, err := strconv.ParseInt(val, 10, 32) + numberOfHandlesFailedToClose := int32(numberOfHandlesFailedToClose32) + if err != nil { + return DirectoryClientForceCloseHandlesResponse{}, err + } + result.NumberOfHandlesFailedToClose = &numberOfHandlesFailedToClose + } + return result, nil +} + +// GetProperties - Returns all system properties for the specified directory, and can also be used to check the existence +// of a directory. The data returned does not include the files in the directory or any +// subdirectories. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - DirectoryClientGetPropertiesOptions contains the optional parameters for the DirectoryClient.GetProperties method. +func (client *DirectoryClient) GetProperties(ctx context.Context, options *DirectoryClientGetPropertiesOptions) (DirectoryClientGetPropertiesResponse, error) { + req, err := client.getPropertiesCreateRequest(ctx, options) + if err != nil { + return DirectoryClientGetPropertiesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return DirectoryClientGetPropertiesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return DirectoryClientGetPropertiesResponse{}, runtime.NewResponseError(resp) + } + return client.getPropertiesHandleResponse(resp) +} + +// getPropertiesCreateRequest creates the GetProperties request. +func (client *DirectoryClient) getPropertiesCreateRequest(ctx context.Context, options *DirectoryClientGetPropertiesOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "directory") + if options != nil && options.Sharesnapshot != nil { + reqQP.Set("sharesnapshot", *options.Sharesnapshot) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getPropertiesHandleResponse handles the GetProperties response. +func (client *DirectoryClient) getPropertiesHandleResponse(resp *http.Response) (DirectoryClientGetPropertiesResponse, error) { + result := DirectoryClientGetPropertiesResponse{} + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return DirectoryClientGetPropertiesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return DirectoryClientGetPropertiesResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return DirectoryClientGetPropertiesResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-file-attributes"); val != "" { + result.FileAttributes = &val + } + if val := resp.Header.Get("x-ms-file-creation-time"); val != "" { + fileCreationTime, err := time.Parse(ISO8601, val) + if err != nil { + return DirectoryClientGetPropertiesResponse{}, err + } + result.FileCreationTime = &fileCreationTime + } + if val := resp.Header.Get("x-ms-file-last-write-time"); val != "" { + fileLastWriteTime, err := time.Parse(ISO8601, val) + if err != nil { + return DirectoryClientGetPropertiesResponse{}, err + } + result.FileLastWriteTime = &fileLastWriteTime + } + if val := resp.Header.Get("x-ms-file-change-time"); val != "" { + fileChangeTime, err := time.Parse(ISO8601, val) + if err != nil { + return DirectoryClientGetPropertiesResponse{}, err + } + result.FileChangeTime = &fileChangeTime + } + if val := resp.Header.Get("x-ms-file-permission-key"); val != "" { + result.FilePermissionKey = &val + } + if val := resp.Header.Get("x-ms-file-id"); val != "" { + result.ID = &val + } + if val := resp.Header.Get("x-ms-file-parent-id"); val != "" { + result.ParentID = &val + } + return result, nil +} + +// NewListFilesAndDirectoriesSegmentPager - Returns a list of files or directories under the specified share or directory. +// It lists the contents only for a single level of the directory hierarchy. +// +// Generated from API version 2020-10-02 +// - options - DirectoryClientListFilesAndDirectoriesSegmentOptions contains the optional parameters for the DirectoryClient.NewListFilesAndDirectoriesSegmentPager +// method. +// +// listFilesAndDirectoriesSegmentCreateRequest creates the ListFilesAndDirectoriesSegment request. +func (client *DirectoryClient) ListFilesAndDirectoriesSegmentCreateRequest(ctx context.Context, options *DirectoryClientListFilesAndDirectoriesSegmentOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "directory") + reqQP.Set("comp", "list") + if options != nil && options.Prefix != nil { + reqQP.Set("prefix", *options.Prefix) + } + if options != nil && options.Sharesnapshot != nil { + reqQP.Set("sharesnapshot", *options.Sharesnapshot) + } + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.IncludeExtendedInfo != nil { + req.Raw().Header["x-ms-file-extended-info"] = []string{strconv.FormatBool(*options.IncludeExtendedInfo)} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// listFilesAndDirectoriesSegmentHandleResponse handles the ListFilesAndDirectoriesSegment response. +func (client *DirectoryClient) ListFilesAndDirectoriesSegmentHandleResponse(resp *http.Response) (DirectoryClientListFilesAndDirectoriesSegmentResponse, error) { + result := DirectoryClientListFilesAndDirectoriesSegmentResponse{} + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return DirectoryClientListFilesAndDirectoriesSegmentResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.ListFilesAndDirectoriesSegmentResponse); err != nil { + return DirectoryClientListFilesAndDirectoriesSegmentResponse{}, err + } + return result, nil +} + +// ListHandles - Lists handles for directory. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - DirectoryClientListHandlesOptions contains the optional parameters for the DirectoryClient.ListHandles method. +func (client *DirectoryClient) ListHandles(ctx context.Context, options *DirectoryClientListHandlesOptions) (DirectoryClientListHandlesResponse, error) { + req, err := client.listHandlesCreateRequest(ctx, options) + if err != nil { + return DirectoryClientListHandlesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return DirectoryClientListHandlesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return DirectoryClientListHandlesResponse{}, runtime.NewResponseError(resp) + } + return client.listHandlesHandleResponse(resp) +} + +// listHandlesCreateRequest creates the ListHandles request. +func (client *DirectoryClient) listHandlesCreateRequest(ctx context.Context, options *DirectoryClientListHandlesOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "listhandles") + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.Sharesnapshot != nil { + reqQP.Set("sharesnapshot", *options.Sharesnapshot) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.Recursive != nil { + req.Raw().Header["x-ms-recursive"] = []string{strconv.FormatBool(*options.Recursive)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// listHandlesHandleResponse handles the ListHandles response. +func (client *DirectoryClient) listHandlesHandleResponse(resp *http.Response) (DirectoryClientListHandlesResponse, error) { + result := DirectoryClientListHandlesResponse{} + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return DirectoryClientListHandlesResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.ListHandlesResponse); err != nil { + return DirectoryClientListHandlesResponse{}, err + } + return result, nil +} + +// SetMetadata - Updates user defined metadata for the specified directory. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - DirectoryClientSetMetadataOptions contains the optional parameters for the DirectoryClient.SetMetadata method. +func (client *DirectoryClient) SetMetadata(ctx context.Context, options *DirectoryClientSetMetadataOptions) (DirectoryClientSetMetadataResponse, error) { + req, err := client.setMetadataCreateRequest(ctx, options) + if err != nil { + return DirectoryClientSetMetadataResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return DirectoryClientSetMetadataResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return DirectoryClientSetMetadataResponse{}, runtime.NewResponseError(resp) + } + return client.setMetadataHandleResponse(resp) +} + +// setMetadataCreateRequest creates the SetMetadata request. +func (client *DirectoryClient) setMetadataCreateRequest(ctx context.Context, options *DirectoryClientSetMetadataOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "directory") + reqQP.Set("comp", "metadata") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// setMetadataHandleResponse handles the SetMetadata response. +func (client *DirectoryClient) setMetadataHandleResponse(resp *http.Response) (DirectoryClientSetMetadataResponse, error) { + result := DirectoryClientSetMetadataResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return DirectoryClientSetMetadataResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return DirectoryClientSetMetadataResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + return result, nil +} + +// SetProperties - Sets properties on the directory. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - fileAttributes - If specified, the provided file attributes shall be set. Default value: ‘Archive’ for file and ‘Directory’ +// for directory. ‘None’ can also be specified as default. +// - fileCreationTime - Creation time for the file/directory. Default value: Now. +// - fileLastWriteTime - Last write time for the file/directory. Default value: Now. +// - options - DirectoryClientSetPropertiesOptions contains the optional parameters for the DirectoryClient.SetProperties method. +func (client *DirectoryClient) SetProperties(ctx context.Context, fileAttributes string, fileCreationTime string, fileLastWriteTime string, options *DirectoryClientSetPropertiesOptions) (DirectoryClientSetPropertiesResponse, error) { + req, err := client.setPropertiesCreateRequest(ctx, fileAttributes, fileCreationTime, fileLastWriteTime, options) + if err != nil { + return DirectoryClientSetPropertiesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return DirectoryClientSetPropertiesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return DirectoryClientSetPropertiesResponse{}, runtime.NewResponseError(resp) + } + return client.setPropertiesHandleResponse(resp) +} + +// setPropertiesCreateRequest creates the SetProperties request. +func (client *DirectoryClient) setPropertiesCreateRequest(ctx context.Context, fileAttributes string, fileCreationTime string, fileLastWriteTime string, options *DirectoryClientSetPropertiesOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "directory") + reqQP.Set("comp", "properties") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.FilePermission != nil { + req.Raw().Header["x-ms-file-permission"] = []string{*options.FilePermission} + } + if options != nil && options.FilePermissionKey != nil { + req.Raw().Header["x-ms-file-permission-key"] = []string{*options.FilePermissionKey} + } + req.Raw().Header["x-ms-file-attributes"] = []string{fileAttributes} + req.Raw().Header["x-ms-file-creation-time"] = []string{fileCreationTime} + req.Raw().Header["x-ms-file-last-write-time"] = []string{fileLastWriteTime} + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// setPropertiesHandleResponse handles the SetProperties response. +func (client *DirectoryClient) setPropertiesHandleResponse(resp *http.Response) (DirectoryClientSetPropertiesResponse, error) { + result := DirectoryClientSetPropertiesResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return DirectoryClientSetPropertiesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return DirectoryClientSetPropertiesResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return DirectoryClientSetPropertiesResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-file-permission-key"); val != "" { + result.FilePermissionKey = &val + } + if val := resp.Header.Get("x-ms-file-attributes"); val != "" { + result.FileAttributes = &val + } + if val := resp.Header.Get("x-ms-file-creation-time"); val != "" { + fileCreationTime, err := time.Parse(ISO8601, val) + if err != nil { + return DirectoryClientSetPropertiesResponse{}, err + } + result.FileCreationTime = &fileCreationTime + } + if val := resp.Header.Get("x-ms-file-last-write-time"); val != "" { + fileLastWriteTime, err := time.Parse(ISO8601, val) + if err != nil { + return DirectoryClientSetPropertiesResponse{}, err + } + result.FileLastWriteTime = &fileLastWriteTime + } + if val := resp.Header.Get("x-ms-file-change-time"); val != "" { + fileChangeTime, err := time.Parse(ISO8601, val) + if err != nil { + return DirectoryClientSetPropertiesResponse{}, err + } + result.FileChangeTime = &fileChangeTime + } + if val := resp.Header.Get("x-ms-file-id"); val != "" { + result.ID = &val + } + if val := resp.Header.Get("x-ms-file-parent-id"); val != "" { + result.ParentID = &val + } + return result, nil +} diff --git a/sdk/storage/azfile/internal/generated/zz_file_client.go b/sdk/storage/azfile/internal/generated/zz_file_client.go new file mode 100644 index 000000000000..cfe2ea780a3b --- /dev/null +++ b/sdk/storage/azfile/internal/generated/zz_file_client.go @@ -0,0 +1,1826 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "context" + "encoding/base64" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +// FileClient contains the methods for the File group. +// Don't use this type directly, use NewFileClient() instead. +type FileClient struct { + endpoint string + pl runtime.Pipeline +} + +// NewFileClient creates a new instance of FileClient with the specified values. +// - endpoint - The URL of the service account, share, directory or file that is the target of the desired operation. +// - pl - the pipeline used for sending requests and handling responses. +func NewFileClient(endpoint string, pl runtime.Pipeline) *FileClient { + client := &FileClient{ + endpoint: endpoint, + pl: pl, + } + return client +} + +// AbortCopy - Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - copyID - The copy identifier provided in the x-ms-copy-id header of the original Copy File operation. +// - options - FileClientAbortCopyOptions contains the optional parameters for the FileClient.AbortCopy method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ShareClient.GetProperties method. +func (client *FileClient) AbortCopy(ctx context.Context, copyID string, options *FileClientAbortCopyOptions, leaseAccessConditions *LeaseAccessConditions) (FileClientAbortCopyResponse, error) { + req, err := client.abortCopyCreateRequest(ctx, copyID, options, leaseAccessConditions) + if err != nil { + return FileClientAbortCopyResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return FileClientAbortCopyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusNoContent) { + return FileClientAbortCopyResponse{}, runtime.NewResponseError(resp) + } + return client.abortCopyHandleResponse(resp) +} + +// abortCopyCreateRequest creates the AbortCopy request. +func (client *FileClient) abortCopyCreateRequest(ctx context.Context, copyID string, options *FileClientAbortCopyOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "copy") + reqQP.Set("copyid", copyID) + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-copy-action"] = []string{"abort"} + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// abortCopyHandleResponse handles the AbortCopy response. +func (client *FileClient) abortCopyHandleResponse(resp *http.Response) (FileClientAbortCopyResponse, error) { + result := FileClientAbortCopyResponse{} + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientAbortCopyResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// AcquireLease - [Update] The Lease File operation establishes and manages a lock on a file for write and delete operations +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - duration - Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite +// lease can be between 15 and 60 seconds. A lease duration cannot be changed using +// renew or change. +// - options - FileClientAcquireLeaseOptions contains the optional parameters for the FileClient.AcquireLease method. +func (client *FileClient) AcquireLease(ctx context.Context, duration int32, options *FileClientAcquireLeaseOptions) (FileClientAcquireLeaseResponse, error) { + req, err := client.acquireLeaseCreateRequest(ctx, duration, options) + if err != nil { + return FileClientAcquireLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return FileClientAcquireLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return FileClientAcquireLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.acquireLeaseHandleResponse(resp) +} + +// acquireLeaseCreateRequest creates the AcquireLease request. +func (client *FileClient) acquireLeaseCreateRequest(ctx context.Context, duration int32, options *FileClientAcquireLeaseOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"acquire"} + req.Raw().Header["x-ms-lease-duration"] = []string{strconv.FormatInt(int64(duration), 10)} + if options != nil && options.ProposedLeaseID != nil { + req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// acquireLeaseHandleResponse handles the AcquireLease response. +func (client *FileClient) acquireLeaseHandleResponse(resp *http.Response) (FileClientAcquireLeaseResponse, error) { + result := FileClientAcquireLeaseResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientAcquireLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientAcquireLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// BreakLease - [Update] The Lease File operation establishes and manages a lock on a file for write and delete operations +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - FileClientBreakLeaseOptions contains the optional parameters for the FileClient.BreakLease method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ShareClient.GetProperties method. +func (client *FileClient) BreakLease(ctx context.Context, options *FileClientBreakLeaseOptions, leaseAccessConditions *LeaseAccessConditions) (FileClientBreakLeaseResponse, error) { + req, err := client.breakLeaseCreateRequest(ctx, options, leaseAccessConditions) + if err != nil { + return FileClientBreakLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return FileClientBreakLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return FileClientBreakLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.breakLeaseHandleResponse(resp) +} + +// breakLeaseCreateRequest creates the BreakLease request. +func (client *FileClient) breakLeaseCreateRequest(ctx context.Context, options *FileClientBreakLeaseOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"break"} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// breakLeaseHandleResponse handles the BreakLease response. +func (client *FileClient) breakLeaseHandleResponse(resp *http.Response) (FileClientBreakLeaseResponse, error) { + result := FileClientBreakLeaseResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientBreakLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientBreakLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// ChangeLease - [Update] The Lease File operation establishes and manages a lock on a file for write and delete operations +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - leaseID - Specifies the current lease ID on the resource. +// - options - FileClientChangeLeaseOptions contains the optional parameters for the FileClient.ChangeLease method. +func (client *FileClient) ChangeLease(ctx context.Context, leaseID string, options *FileClientChangeLeaseOptions) (FileClientChangeLeaseResponse, error) { + req, err := client.changeLeaseCreateRequest(ctx, leaseID, options) + if err != nil { + return FileClientChangeLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return FileClientChangeLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return FileClientChangeLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.changeLeaseHandleResponse(resp) +} + +// changeLeaseCreateRequest creates the ChangeLease request. +func (client *FileClient) changeLeaseCreateRequest(ctx context.Context, leaseID string, options *FileClientChangeLeaseOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"change"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + if options != nil && options.ProposedLeaseID != nil { + req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// changeLeaseHandleResponse handles the ChangeLease response. +func (client *FileClient) changeLeaseHandleResponse(resp *http.Response) (FileClientChangeLeaseResponse, error) { + result := FileClientChangeLeaseResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientChangeLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientChangeLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// Create - Creates a new file or replaces a file. Note it only initializes the file with no content. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - fileContentLength - Specifies the maximum size for the file, up to 4 TB. +// - fileAttributes - If specified, the provided file attributes shall be set. Default value: ‘Archive’ for file and ‘Directory’ +// for directory. ‘None’ can also be specified as default. +// - fileCreationTime - Creation time for the file/directory. Default value: Now. +// - fileLastWriteTime - Last write time for the file/directory. Default value: Now. +// - options - FileClientCreateOptions contains the optional parameters for the FileClient.Create method. +// - ShareFileHTTPHeaders - ShareFileHTTPHeaders contains a group of parameters for the FileClient.Create method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ShareClient.GetProperties method. +func (client *FileClient) Create(ctx context.Context, fileContentLength int64, fileAttributes string, fileCreationTime string, fileLastWriteTime string, options *FileClientCreateOptions, shareFileHTTPHeaders *ShareFileHTTPHeaders, leaseAccessConditions *LeaseAccessConditions) (FileClientCreateResponse, error) { + req, err := client.createCreateRequest(ctx, fileContentLength, fileAttributes, fileCreationTime, fileLastWriteTime, options, shareFileHTTPHeaders, leaseAccessConditions) + if err != nil { + return FileClientCreateResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return FileClientCreateResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return FileClientCreateResponse{}, runtime.NewResponseError(resp) + } + return client.createHandleResponse(resp) +} + +// createCreateRequest creates the Create request. +func (client *FileClient) createCreateRequest(ctx context.Context, fileContentLength int64, fileAttributes string, fileCreationTime string, fileLastWriteTime string, options *FileClientCreateOptions, shareFileHTTPHeaders *ShareFileHTTPHeaders, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-content-length"] = []string{strconv.FormatInt(fileContentLength, 10)} + req.Raw().Header["x-ms-type"] = []string{"file"} + if shareFileHTTPHeaders != nil && shareFileHTTPHeaders.ContentType != nil { + req.Raw().Header["x-ms-content-type"] = []string{*shareFileHTTPHeaders.ContentType} + } + if shareFileHTTPHeaders != nil && shareFileHTTPHeaders.ContentEncoding != nil { + req.Raw().Header["x-ms-content-encoding"] = []string{*shareFileHTTPHeaders.ContentEncoding} + } + if shareFileHTTPHeaders != nil && shareFileHTTPHeaders.ContentLanguage != nil { + req.Raw().Header["x-ms-content-language"] = []string{*shareFileHTTPHeaders.ContentLanguage} + } + if shareFileHTTPHeaders != nil && shareFileHTTPHeaders.CacheControl != nil { + req.Raw().Header["x-ms-cache-control"] = []string{*shareFileHTTPHeaders.CacheControl} + } + if shareFileHTTPHeaders != nil && shareFileHTTPHeaders.ContentMD5 != nil { + req.Raw().Header["x-ms-content-md5"] = []string{base64.StdEncoding.EncodeToString(shareFileHTTPHeaders.ContentMD5)} + } + if shareFileHTTPHeaders != nil && shareFileHTTPHeaders.ContentDisposition != nil { + req.Raw().Header["x-ms-content-disposition"] = []string{*shareFileHTTPHeaders.ContentDisposition} + } + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + if options != nil && options.FilePermission != nil { + req.Raw().Header["x-ms-file-permission"] = []string{*options.FilePermission} + } + if options != nil && options.FilePermissionKey != nil { + req.Raw().Header["x-ms-file-permission-key"] = []string{*options.FilePermissionKey} + } + req.Raw().Header["x-ms-file-attributes"] = []string{fileAttributes} + req.Raw().Header["x-ms-file-creation-time"] = []string{fileCreationTime} + req.Raw().Header["x-ms-file-last-write-time"] = []string{fileLastWriteTime} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// createHandleResponse handles the Create response. +func (client *FileClient) createHandleResponse(resp *http.Response) (FileClientCreateResponse, error) { + result := FileClientCreateResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientCreateResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientCreateResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return FileClientCreateResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-file-permission-key"); val != "" { + result.FilePermissionKey = &val + } + if val := resp.Header.Get("x-ms-file-attributes"); val != "" { + result.FileAttributes = &val + } + if val := resp.Header.Get("x-ms-file-creation-time"); val != "" { + fileCreationTime, err := time.Parse(ISO8601, val) + if err != nil { + return FileClientCreateResponse{}, err + } + result.FileCreationTime = &fileCreationTime + } + if val := resp.Header.Get("x-ms-file-last-write-time"); val != "" { + fileLastWriteTime, err := time.Parse(ISO8601, val) + if err != nil { + return FileClientCreateResponse{}, err + } + result.FileLastWriteTime = &fileLastWriteTime + } + if val := resp.Header.Get("x-ms-file-change-time"); val != "" { + fileChangeTime, err := time.Parse(ISO8601, val) + if err != nil { + return FileClientCreateResponse{}, err + } + result.FileChangeTime = &fileChangeTime + } + if val := resp.Header.Get("x-ms-file-id"); val != "" { + result.ID = &val + } + if val := resp.Header.Get("x-ms-file-parent-id"); val != "" { + result.ParentID = &val + } + return result, nil +} + +// Delete - removes the file from the storage account. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - FileClientDeleteOptions contains the optional parameters for the FileClient.Delete method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ShareClient.GetProperties method. +func (client *FileClient) Delete(ctx context.Context, options *FileClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions) (FileClientDeleteResponse, error) { + req, err := client.deleteCreateRequest(ctx, options, leaseAccessConditions) + if err != nil { + return FileClientDeleteResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return FileClientDeleteResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return FileClientDeleteResponse{}, runtime.NewResponseError(resp) + } + return client.deleteHandleResponse(resp) +} + +// deleteCreateRequest creates the Delete request. +func (client *FileClient) deleteCreateRequest(ctx context.Context, options *FileClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodDelete, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// deleteHandleResponse handles the Delete response. +func (client *FileClient) deleteHandleResponse(resp *http.Response) (FileClientDeleteResponse, error) { + result := FileClientDeleteResponse{} + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientDeleteResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// Download - Reads or downloads a file from the system, including its metadata and properties. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - FileClientDownloadOptions contains the optional parameters for the FileClient.Download method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ShareClient.GetProperties method. +func (client *FileClient) Download(ctx context.Context, options *FileClientDownloadOptions, leaseAccessConditions *LeaseAccessConditions) (FileClientDownloadResponse, error) { + req, err := client.downloadCreateRequest(ctx, options, leaseAccessConditions) + if err != nil { + return FileClientDownloadResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return FileClientDownloadResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusPartialContent) { + return FileClientDownloadResponse{}, runtime.NewResponseError(resp) + } + return client.downloadHandleResponse(resp) +} + +// downloadCreateRequest creates the Download request. +func (client *FileClient) downloadCreateRequest(ctx context.Context, options *FileClientDownloadOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + runtime.SkipBodyDownload(req) + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} + } + if options != nil && options.RangeGetContentMD5 != nil { + req.Raw().Header["x-ms-range-get-content-md5"] = []string{strconv.FormatBool(*options.RangeGetContentMD5)} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// downloadHandleResponse handles the Download response. +func (client *FileClient) downloadHandleResponse(resp *http.Response) (FileClientDownloadResponse, error) { + result := FileClientDownloadResponse{Body: resp.Body} + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientDownloadResponse{}, err + } + result.LastModified = &lastModified + } + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("Content-Length"); val != "" { + contentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return FileClientDownloadResponse{}, err + } + result.ContentLength = &contentLength + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("Content-Range"); val != "" { + result.ContentRange = &val + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return FileClientDownloadResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Content-Encoding"); val != "" { + result.ContentEncoding = &val + } + if val := resp.Header.Get("Cache-Control"); val != "" { + result.CacheControl = &val + } + if val := resp.Header.Get("Content-Disposition"); val != "" { + result.ContentDisposition = &val + } + if val := resp.Header.Get("Content-Language"); val != "" { + result.ContentLanguage = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Accept-Ranges"); val != "" { + result.AcceptRanges = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientDownloadResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { + copyCompletionTime, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientDownloadResponse{}, err + } + result.CopyCompletionTime = ©CompletionTime + } + if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { + result.CopyStatusDescription = &val + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-progress"); val != "" { + result.CopyProgress = &val + } + if val := resp.Header.Get("x-ms-copy-source"); val != "" { + result.CopySource = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + if val := resp.Header.Get("x-ms-content-md5"); val != "" { + fileContentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return FileClientDownloadResponse{}, err + } + result.FileContentMD5 = fileContentMD5 + } + if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return FileClientDownloadResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-file-attributes"); val != "" { + result.FileAttributes = &val + } + if val := resp.Header.Get("x-ms-file-creation-time"); val != "" { + fileCreationTime, err := time.Parse(ISO8601, val) + if err != nil { + return FileClientDownloadResponse{}, err + } + result.FileCreationTime = &fileCreationTime + } + if val := resp.Header.Get("x-ms-file-last-write-time"); val != "" { + fileLastWriteTime, err := time.Parse(ISO8601, val) + if err != nil { + return FileClientDownloadResponse{}, err + } + result.FileLastWriteTime = &fileLastWriteTime + } + if val := resp.Header.Get("x-ms-file-change-time"); val != "" { + fileChangeTime, err := time.Parse(ISO8601, val) + if err != nil { + return FileClientDownloadResponse{}, err + } + result.FileChangeTime = &fileChangeTime + } + if val := resp.Header.Get("x-ms-file-permission-key"); val != "" { + result.FilePermissionKey = &val + } + if val := resp.Header.Get("x-ms-file-id"); val != "" { + result.ID = &val + } + if val := resp.Header.Get("x-ms-file-parent-id"); val != "" { + result.ParentID = &val + } + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) + } + return result, nil +} + +// ForceCloseHandles - Closes all handles open for given file +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - handleID - Specifies handle ID opened on the file or directory to be closed. Asterisk (‘*’) is a wildcard that specifies +// all handles. +// - options - FileClientForceCloseHandlesOptions contains the optional parameters for the FileClient.ForceCloseHandles method. +func (client *FileClient) ForceCloseHandles(ctx context.Context, handleID string, options *FileClientForceCloseHandlesOptions) (FileClientForceCloseHandlesResponse, error) { + req, err := client.forceCloseHandlesCreateRequest(ctx, handleID, options) + if err != nil { + return FileClientForceCloseHandlesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return FileClientForceCloseHandlesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return FileClientForceCloseHandlesResponse{}, runtime.NewResponseError(resp) + } + return client.forceCloseHandlesHandleResponse(resp) +} + +// forceCloseHandlesCreateRequest creates the ForceCloseHandles request. +func (client *FileClient) forceCloseHandlesCreateRequest(ctx context.Context, handleID string, options *FileClientForceCloseHandlesOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "forceclosehandles") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Sharesnapshot != nil { + reqQP.Set("sharesnapshot", *options.Sharesnapshot) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-handle-id"] = []string{handleID} + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// forceCloseHandlesHandleResponse handles the ForceCloseHandles response. +func (client *FileClient) forceCloseHandlesHandleResponse(resp *http.Response) (FileClientForceCloseHandlesResponse, error) { + result := FileClientForceCloseHandlesResponse{} + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientForceCloseHandlesResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-marker"); val != "" { + result.Marker = &val + } + if val := resp.Header.Get("x-ms-number-of-handles-closed"); val != "" { + numberOfHandlesClosed32, err := strconv.ParseInt(val, 10, 32) + numberOfHandlesClosed := int32(numberOfHandlesClosed32) + if err != nil { + return FileClientForceCloseHandlesResponse{}, err + } + result.NumberOfHandlesClosed = &numberOfHandlesClosed + } + if val := resp.Header.Get("x-ms-number-of-handles-failed"); val != "" { + numberOfHandlesFailedToClose32, err := strconv.ParseInt(val, 10, 32) + numberOfHandlesFailedToClose := int32(numberOfHandlesFailedToClose32) + if err != nil { + return FileClientForceCloseHandlesResponse{}, err + } + result.NumberOfHandlesFailedToClose = &numberOfHandlesFailedToClose + } + return result, nil +} + +// GetProperties - Returns all user-defined metadata, standard HTTP properties, and system properties for the file. It does +// not return the content of the file. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - FileClientGetPropertiesOptions contains the optional parameters for the FileClient.GetProperties method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ShareClient.GetProperties method. +func (client *FileClient) GetProperties(ctx context.Context, options *FileClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (FileClientGetPropertiesResponse, error) { + req, err := client.getPropertiesCreateRequest(ctx, options, leaseAccessConditions) + if err != nil { + return FileClientGetPropertiesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return FileClientGetPropertiesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return FileClientGetPropertiesResponse{}, runtime.NewResponseError(resp) + } + return client.getPropertiesHandleResponse(resp) +} + +// getPropertiesCreateRequest creates the GetProperties request. +func (client *FileClient) getPropertiesCreateRequest(ctx context.Context, options *FileClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodHead, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Sharesnapshot != nil { + reqQP.Set("sharesnapshot", *options.Sharesnapshot) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getPropertiesHandleResponse handles the GetProperties response. +func (client *FileClient) getPropertiesHandleResponse(resp *http.Response) (FileClientGetPropertiesResponse, error) { + result := FileClientGetPropertiesResponse{} + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientGetPropertiesResponse{}, err + } + result.LastModified = &lastModified + } + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("x-ms-type"); val != "" { + result.FileType = &val + } + if val := resp.Header.Get("Content-Length"); val != "" { + contentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return FileClientGetPropertiesResponse{}, err + } + result.ContentLength = &contentLength + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return FileClientGetPropertiesResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Content-Encoding"); val != "" { + result.ContentEncoding = &val + } + if val := resp.Header.Get("Cache-Control"); val != "" { + result.CacheControl = &val + } + if val := resp.Header.Get("Content-Disposition"); val != "" { + result.ContentDisposition = &val + } + if val := resp.Header.Get("Content-Language"); val != "" { + result.ContentLanguage = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientGetPropertiesResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { + copyCompletionTime, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientGetPropertiesResponse{}, err + } + result.CopyCompletionTime = ©CompletionTime + } + if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { + result.CopyStatusDescription = &val + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-progress"); val != "" { + result.CopyProgress = &val + } + if val := resp.Header.Get("x-ms-copy-source"); val != "" { + result.CopySource = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return FileClientGetPropertiesResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-file-attributes"); val != "" { + result.FileAttributes = &val + } + if val := resp.Header.Get("x-ms-file-creation-time"); val != "" { + fileCreationTime, err := time.Parse(ISO8601, val) + if err != nil { + return FileClientGetPropertiesResponse{}, err + } + result.FileCreationTime = &fileCreationTime + } + if val := resp.Header.Get("x-ms-file-last-write-time"); val != "" { + fileLastWriteTime, err := time.Parse(ISO8601, val) + if err != nil { + return FileClientGetPropertiesResponse{}, err + } + result.FileLastWriteTime = &fileLastWriteTime + } + if val := resp.Header.Get("x-ms-file-change-time"); val != "" { + fileChangeTime, err := time.Parse(ISO8601, val) + if err != nil { + return FileClientGetPropertiesResponse{}, err + } + result.FileChangeTime = &fileChangeTime + } + if val := resp.Header.Get("x-ms-file-permission-key"); val != "" { + result.FilePermissionKey = &val + } + if val := resp.Header.Get("x-ms-file-id"); val != "" { + result.ID = &val + } + if val := resp.Header.Get("x-ms-file-parent-id"); val != "" { + result.ParentID = &val + } + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) + } + return result, nil +} + +// GetRangeList - Returns the list of valid ranges for a file. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - FileClientGetRangeListOptions contains the optional parameters for the FileClient.GetRangeList method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ShareClient.GetProperties method. +func (client *FileClient) GetRangeList(ctx context.Context, options *FileClientGetRangeListOptions, leaseAccessConditions *LeaseAccessConditions) (FileClientGetRangeListResponse, error) { + req, err := client.getRangeListCreateRequest(ctx, options, leaseAccessConditions) + if err != nil { + return FileClientGetRangeListResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return FileClientGetRangeListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return FileClientGetRangeListResponse{}, runtime.NewResponseError(resp) + } + return client.getRangeListHandleResponse(resp) +} + +// getRangeListCreateRequest creates the GetRangeList request. +func (client *FileClient) getRangeListCreateRequest(ctx context.Context, options *FileClientGetRangeListOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "rangelist") + if options != nil && options.Sharesnapshot != nil { + reqQP.Set("sharesnapshot", *options.Sharesnapshot) + } + if options != nil && options.Prevsharesnapshot != nil { + reqQP.Set("prevsharesnapshot", *options.Prevsharesnapshot) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.Range != nil { + req.Raw().Header["x-ms-range"] = []string{*options.Range} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getRangeListHandleResponse handles the GetRangeList response. +func (client *FileClient) getRangeListHandleResponse(resp *http.Response) (FileClientGetRangeListResponse, error) { + result := FileClientGetRangeListResponse{} + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientGetRangeListResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-content-length"); val != "" { + fileContentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return FileClientGetRangeListResponse{}, err + } + result.FileContentLength = &fileContentLength + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientGetRangeListResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.ShareFileRangeList); err != nil { + return FileClientGetRangeListResponse{}, err + } + return result, nil +} + +// ListHandles - Lists handles for file +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - FileClientListHandlesOptions contains the optional parameters for the FileClient.ListHandles method. +func (client *FileClient) ListHandles(ctx context.Context, options *FileClientListHandlesOptions) (FileClientListHandlesResponse, error) { + req, err := client.listHandlesCreateRequest(ctx, options) + if err != nil { + return FileClientListHandlesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return FileClientListHandlesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return FileClientListHandlesResponse{}, runtime.NewResponseError(resp) + } + return client.listHandlesHandleResponse(resp) +} + +// listHandlesCreateRequest creates the ListHandles request. +func (client *FileClient) listHandlesCreateRequest(ctx context.Context, options *FileClientListHandlesOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "listhandles") + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.Sharesnapshot != nil { + reqQP.Set("sharesnapshot", *options.Sharesnapshot) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// listHandlesHandleResponse handles the ListHandles response. +func (client *FileClient) listHandlesHandleResponse(resp *http.Response) (FileClientListHandlesResponse, error) { + result := FileClientListHandlesResponse{} + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientListHandlesResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.ListHandlesResponse); err != nil { + return FileClientListHandlesResponse{}, err + } + return result, nil +} + +// ReleaseLease - [Update] The Lease File operation establishes and manages a lock on a file for write and delete operations +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - leaseID - Specifies the current lease ID on the resource. +// - options - FileClientReleaseLeaseOptions contains the optional parameters for the FileClient.ReleaseLease method. +func (client *FileClient) ReleaseLease(ctx context.Context, leaseID string, options *FileClientReleaseLeaseOptions) (FileClientReleaseLeaseResponse, error) { + req, err := client.releaseLeaseCreateRequest(ctx, leaseID, options) + if err != nil { + return FileClientReleaseLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return FileClientReleaseLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return FileClientReleaseLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.releaseLeaseHandleResponse(resp) +} + +// releaseLeaseCreateRequest creates the ReleaseLease request. +func (client *FileClient) releaseLeaseCreateRequest(ctx context.Context, leaseID string, options *FileClientReleaseLeaseOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"release"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// releaseLeaseHandleResponse handles the ReleaseLease response. +func (client *FileClient) releaseLeaseHandleResponse(resp *http.Response) (FileClientReleaseLeaseResponse, error) { + result := FileClientReleaseLeaseResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientReleaseLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientReleaseLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// SetHTTPHeaders - Sets HTTP headers on the file. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - fileAttributes - If specified, the provided file attributes shall be set. Default value: ‘Archive’ for file and ‘Directory’ +// for directory. ‘None’ can also be specified as default. +// - fileCreationTime - Creation time for the file/directory. Default value: Now. +// - fileLastWriteTime - Last write time for the file/directory. Default value: Now. +// - options - FileClientSetHTTPHeadersOptions contains the optional parameters for the FileClient.SetHTTPHeaders method. +// - ShareFileHTTPHeaders - ShareFileHTTPHeaders contains a group of parameters for the FileClient.Create method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ShareClient.GetProperties method. +func (client *FileClient) SetHTTPHeaders(ctx context.Context, fileAttributes string, fileCreationTime string, fileLastWriteTime string, options *FileClientSetHTTPHeadersOptions, shareFileHTTPHeaders *ShareFileHTTPHeaders, leaseAccessConditions *LeaseAccessConditions) (FileClientSetHTTPHeadersResponse, error) { + req, err := client.setHTTPHeadersCreateRequest(ctx, fileAttributes, fileCreationTime, fileLastWriteTime, options, shareFileHTTPHeaders, leaseAccessConditions) + if err != nil { + return FileClientSetHTTPHeadersResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return FileClientSetHTTPHeadersResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return FileClientSetHTTPHeadersResponse{}, runtime.NewResponseError(resp) + } + return client.setHTTPHeadersHandleResponse(resp) +} + +// setHTTPHeadersCreateRequest creates the SetHTTPHeaders request. +func (client *FileClient) setHTTPHeadersCreateRequest(ctx context.Context, fileAttributes string, fileCreationTime string, fileLastWriteTime string, options *FileClientSetHTTPHeadersOptions, shareFileHTTPHeaders *ShareFileHTTPHeaders, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "properties") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.FileContentLength != nil { + req.Raw().Header["x-ms-content-length"] = []string{strconv.FormatInt(*options.FileContentLength, 10)} + } + if shareFileHTTPHeaders != nil && shareFileHTTPHeaders.ContentType != nil { + req.Raw().Header["x-ms-content-type"] = []string{*shareFileHTTPHeaders.ContentType} + } + if shareFileHTTPHeaders != nil && shareFileHTTPHeaders.ContentEncoding != nil { + req.Raw().Header["x-ms-content-encoding"] = []string{*shareFileHTTPHeaders.ContentEncoding} + } + if shareFileHTTPHeaders != nil && shareFileHTTPHeaders.ContentLanguage != nil { + req.Raw().Header["x-ms-content-language"] = []string{*shareFileHTTPHeaders.ContentLanguage} + } + if shareFileHTTPHeaders != nil && shareFileHTTPHeaders.CacheControl != nil { + req.Raw().Header["x-ms-cache-control"] = []string{*shareFileHTTPHeaders.CacheControl} + } + if shareFileHTTPHeaders != nil && shareFileHTTPHeaders.ContentMD5 != nil { + req.Raw().Header["x-ms-content-md5"] = []string{base64.StdEncoding.EncodeToString(shareFileHTTPHeaders.ContentMD5)} + } + if shareFileHTTPHeaders != nil && shareFileHTTPHeaders.ContentDisposition != nil { + req.Raw().Header["x-ms-content-disposition"] = []string{*shareFileHTTPHeaders.ContentDisposition} + } + if options != nil && options.FilePermission != nil { + req.Raw().Header["x-ms-file-permission"] = []string{*options.FilePermission} + } + if options != nil && options.FilePermissionKey != nil { + req.Raw().Header["x-ms-file-permission-key"] = []string{*options.FilePermissionKey} + } + req.Raw().Header["x-ms-file-attributes"] = []string{fileAttributes} + req.Raw().Header["x-ms-file-creation-time"] = []string{fileCreationTime} + req.Raw().Header["x-ms-file-last-write-time"] = []string{fileLastWriteTime} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// setHTTPHeadersHandleResponse handles the SetHTTPHeaders response. +func (client *FileClient) setHTTPHeadersHandleResponse(resp *http.Response) (FileClientSetHTTPHeadersResponse, error) { + result := FileClientSetHTTPHeadersResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientSetHTTPHeadersResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientSetHTTPHeadersResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return FileClientSetHTTPHeadersResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-file-permission-key"); val != "" { + result.FilePermissionKey = &val + } + if val := resp.Header.Get("x-ms-file-attributes"); val != "" { + result.FileAttributes = &val + } + if val := resp.Header.Get("x-ms-file-creation-time"); val != "" { + fileCreationTime, err := time.Parse(ISO8601, val) + if err != nil { + return FileClientSetHTTPHeadersResponse{}, err + } + result.FileCreationTime = &fileCreationTime + } + if val := resp.Header.Get("x-ms-file-last-write-time"); val != "" { + fileLastWriteTime, err := time.Parse(ISO8601, val) + if err != nil { + return FileClientSetHTTPHeadersResponse{}, err + } + result.FileLastWriteTime = &fileLastWriteTime + } + if val := resp.Header.Get("x-ms-file-change-time"); val != "" { + fileChangeTime, err := time.Parse(ISO8601, val) + if err != nil { + return FileClientSetHTTPHeadersResponse{}, err + } + result.FileChangeTime = &fileChangeTime + } + if val := resp.Header.Get("x-ms-file-id"); val != "" { + result.ID = &val + } + if val := resp.Header.Get("x-ms-file-parent-id"); val != "" { + result.ParentID = &val + } + return result, nil +} + +// SetMetadata - Updates user-defined metadata for the specified file. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - FileClientSetMetadataOptions contains the optional parameters for the FileClient.SetMetadata method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ShareClient.GetProperties method. +func (client *FileClient) SetMetadata(ctx context.Context, options *FileClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions) (FileClientSetMetadataResponse, error) { + req, err := client.setMetadataCreateRequest(ctx, options, leaseAccessConditions) + if err != nil { + return FileClientSetMetadataResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return FileClientSetMetadataResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return FileClientSetMetadataResponse{}, runtime.NewResponseError(resp) + } + return client.setMetadataHandleResponse(resp) +} + +// setMetadataCreateRequest creates the SetMetadata request. +func (client *FileClient) setMetadataCreateRequest(ctx context.Context, options *FileClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "metadata") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// setMetadataHandleResponse handles the SetMetadata response. +func (client *FileClient) setMetadataHandleResponse(resp *http.Response) (FileClientSetMetadataResponse, error) { + result := FileClientSetMetadataResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientSetMetadataResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return FileClientSetMetadataResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientSetMetadataResponse{}, err + } + result.LastModified = &lastModified + } + return result, nil +} + +// StartCopy - Copies a blob or file to a destination file within the storage account. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - copySource - Specifies the URL of the source file or blob, up to 2 KB in length. To copy a file to another file within +// the same storage account, you may use Shared Key to authenticate the source file. If you are +// copying a file from another storage account, or if you are copying a blob from the same storage account or another storage +// account, then you must authenticate the source file or blob using a shared +// access signature. If the source is a public blob, no authentication is required to perform the copy operation. A file in +// a share snapshot can also be specified as a copy source. +// - options - FileClientStartCopyOptions contains the optional parameters for the FileClient.StartCopy method. +// - CopyFileSMBInfo - CopyFileSMBInfo contains a group of parameters for the FileClient.StartCopy method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ShareClient.GetProperties method. +func (client *FileClient) StartCopy(ctx context.Context, copySource string, options *FileClientStartCopyOptions, copyFileSMBInfo *CopyFileSMBInfo, leaseAccessConditions *LeaseAccessConditions) (FileClientStartCopyResponse, error) { + req, err := client.startCopyCreateRequest(ctx, copySource, options, copyFileSMBInfo, leaseAccessConditions) + if err != nil { + return FileClientStartCopyResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return FileClientStartCopyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return FileClientStartCopyResponse{}, runtime.NewResponseError(resp) + } + return client.startCopyHandleResponse(resp) +} + +// startCopyCreateRequest creates the StartCopy request. +func (client *FileClient) startCopyCreateRequest(ctx context.Context, copySource string, options *FileClientStartCopyOptions, copyFileSMBInfo *CopyFileSMBInfo, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + req.Raw().Header["x-ms-copy-source"] = []string{copySource} + if options != nil && options.FilePermission != nil { + req.Raw().Header["x-ms-file-permission"] = []string{*options.FilePermission} + } + if options != nil && options.FilePermissionKey != nil { + req.Raw().Header["x-ms-file-permission-key"] = []string{*options.FilePermissionKey} + } + if copyFileSMBInfo != nil && copyFileSMBInfo.FilePermissionCopyMode != nil { + req.Raw().Header["x-ms-file-permission-copy-mode"] = []string{string(*copyFileSMBInfo.FilePermissionCopyMode)} + } + if copyFileSMBInfo != nil && copyFileSMBInfo.IgnoreReadOnly != nil { + req.Raw().Header["x-ms-file-copy-ignore-readonly"] = []string{strconv.FormatBool(*copyFileSMBInfo.IgnoreReadOnly)} + } + if copyFileSMBInfo != nil && copyFileSMBInfo.FileAttributes != nil { + req.Raw().Header["x-ms-file-attributes"] = []string{*copyFileSMBInfo.FileAttributes} + } + if copyFileSMBInfo != nil && copyFileSMBInfo.FileCreationTime != nil { + req.Raw().Header["x-ms-file-creation-time"] = []string{*copyFileSMBInfo.FileCreationTime} + } + if copyFileSMBInfo != nil && copyFileSMBInfo.FileLastWriteTime != nil { + req.Raw().Header["x-ms-file-last-write-time"] = []string{*copyFileSMBInfo.FileLastWriteTime} + } + if copyFileSMBInfo != nil && copyFileSMBInfo.SetArchiveAttribute != nil { + req.Raw().Header["x-ms-file-copy-set-archive"] = []string{strconv.FormatBool(*copyFileSMBInfo.SetArchiveAttribute)} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// startCopyHandleResponse handles the StartCopy response. +func (client *FileClient) startCopyHandleResponse(resp *http.Response) (FileClientStartCopyResponse, error) { + result := FileClientStartCopyResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientStartCopyResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientStartCopyResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + return result, nil +} + +// UploadRange - Upload a range of bytes to a file. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - rangeParam - Specifies the range of bytes to be written. Both the start and end of the range must be specified. For an +// update operation, the range can be up to 4 MB in size. For a clear operation, the range can be +// up to the value of the file's full size. The File service accepts only a single byte range for the Range and 'x-ms-range' +// headers, and the byte range must be specified in the following format: +// bytes=startByte-endByte. +// - fileRangeWrite - Specify one of the following options: - Update: Writes the bytes specified by the request body into the +// specified range. The Range and Content-Length headers must match to perform the update. - Clear: +// Clears the specified range and releases the space used in storage for that range. To clear a range, set the Content-Length +// header to zero, and set the Range header to a value that indicates the range +// to clear, up to maximum file size. +// - contentLength - Specifies the number of bytes being transmitted in the request body. When the x-ms-write header is set +// to clear, the value of this header must be set to zero. +// - optionalbody - Initial data. +// - options - FileClientUploadRangeOptions contains the optional parameters for the FileClient.UploadRange method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ShareClient.GetProperties method. +func (client *FileClient) UploadRange(ctx context.Context, rangeParam string, fileRangeWrite FileRangeWriteType, contentLength int64, optionalbody io.ReadSeekCloser, options *FileClientUploadRangeOptions, leaseAccessConditions *LeaseAccessConditions) (FileClientUploadRangeResponse, error) { + req, err := client.uploadRangeCreateRequest(ctx, rangeParam, fileRangeWrite, contentLength, optionalbody, options, leaseAccessConditions) + if err != nil { + return FileClientUploadRangeResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return FileClientUploadRangeResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return FileClientUploadRangeResponse{}, runtime.NewResponseError(resp) + } + return client.uploadRangeHandleResponse(resp) +} + +// uploadRangeCreateRequest creates the UploadRange request. +func (client *FileClient) uploadRangeCreateRequest(ctx context.Context, rangeParam string, fileRangeWrite FileRangeWriteType, contentLength int64, optionalbody io.ReadSeekCloser, options *FileClientUploadRangeOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "range") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-range"] = []string{rangeParam} + req.Raw().Header["x-ms-write"] = []string{string(fileRangeWrite)} + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if options != nil && options.ContentMD5 != nil { + req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.ContentMD5)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, req.SetBody(optionalbody, "application/octet-stream") +} + +// uploadRangeHandleResponse handles the UploadRange response. +func (client *FileClient) uploadRangeHandleResponse(resp *http.Response) (FileClientUploadRangeResponse, error) { + result := FileClientUploadRangeResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientUploadRangeResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return FileClientUploadRangeResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientUploadRangeResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return FileClientUploadRangeResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + return result, nil +} + +// UploadRangeFromURL - Upload a range of bytes to a file where the contents are read from a URL. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - rangeParam - Writes data to the specified byte range in the file. +// - copySource - Specifies the URL of the source file or blob, up to 2 KB in length. To copy a file to another file within +// the same storage account, you may use Shared Key to authenticate the source file. If you are +// copying a file from another storage account, or if you are copying a blob from the same storage account or another storage +// account, then you must authenticate the source file or blob using a shared +// access signature. If the source is a public blob, no authentication is required to perform the copy operation. A file in +// a share snapshot can also be specified as a copy source. +// - contentLength - Specifies the number of bytes being transmitted in the request body. When the x-ms-write header is set +// to clear, the value of this header must be set to zero. +// - options - FileClientUploadRangeFromURLOptions contains the optional parameters for the FileClient.UploadRangeFromURL method. +// - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the FileClient.UploadRangeFromURL +// method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ShareClient.GetProperties method. +func (client *FileClient) UploadRangeFromURL(ctx context.Context, rangeParam string, copySource string, contentLength int64, options *FileClientUploadRangeFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (FileClientUploadRangeFromURLResponse, error) { + req, err := client.uploadRangeFromURLCreateRequest(ctx, rangeParam, copySource, contentLength, options, sourceModifiedAccessConditions, leaseAccessConditions) + if err != nil { + return FileClientUploadRangeFromURLResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return FileClientUploadRangeFromURLResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return FileClientUploadRangeFromURLResponse{}, runtime.NewResponseError(resp) + } + return client.uploadRangeFromURLHandleResponse(resp) +} + +// uploadRangeFromURLCreateRequest creates the UploadRangeFromURL request. +func (client *FileClient) uploadRangeFromURLCreateRequest(ctx context.Context, rangeParam string, copySource string, contentLength int64, options *FileClientUploadRangeFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "range") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-range"] = []string{rangeParam} + req.Raw().Header["x-ms-copy-source"] = []string{copySource} + if options != nil && options.SourceRange != nil { + req.Raw().Header["x-ms-source-range"] = []string{*options.SourceRange} + } + req.Raw().Header["x-ms-write"] = []string{"update"} + req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} + if options != nil && options.SourceContentCRC64 != nil { + req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentCRC64)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatchCRC64 != nil { + req.Raw().Header["x-ms-source-if-match-crc64"] = []string{base64.StdEncoding.EncodeToString(sourceModifiedAccessConditions.SourceIfMatchCRC64)} + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatchCRC64 != nil { + req.Raw().Header["x-ms-source-if-none-match-crc64"] = []string{base64.StdEncoding.EncodeToString(sourceModifiedAccessConditions.SourceIfNoneMatchCRC64)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if options != nil && options.CopySourceAuthorization != nil { + req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// uploadRangeFromURLHandleResponse handles the UploadRangeFromURL response. +func (client *FileClient) uploadRangeFromURLHandleResponse(resp *http.Response) (FileClientUploadRangeFromURLResponse, error) { + result := FileClientUploadRangeFromURLResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientUploadRangeFromURLResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return FileClientUploadRangeFromURLResponse{}, err + } + result.XMSContentCRC64 = xMSContentCRC64 + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return FileClientUploadRangeFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return FileClientUploadRangeFromURLResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return FileClientUploadRangeFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 + } + return result, nil +} diff --git a/sdk/storage/azfile/internal/generated/zz_models.go b/sdk/storage/azfile/internal/generated/zz_models.go new file mode 100644 index 000000000000..95443aea430f --- /dev/null +++ b/sdk/storage/azfile/internal/generated/zz_models.go @@ -0,0 +1,932 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "time" +) + +// AccessPolicy - An Access policy. +type AccessPolicy struct { + // The date-time the policy expires. + Expiry *time.Time `xml:"Expiry"` + + // The permissions for the ACL policy. + Permission *string `xml:"Permission"` + + // The date-time the policy is active. + Start *time.Time `xml:"Start"` +} + +type ClearRange struct { + // REQUIRED + End *int64 `xml:"End"` + + // REQUIRED + Start *int64 `xml:"Start"` +} + +// CopyFileSMBInfo contains a group of parameters for the FileClient.StartCopy method. +type CopyFileSMBInfo struct { + // Specifies either the option to copy file attributes from a source file(source) to a target file or a list of attributes + // to set on a target file. + FileAttributes *string + // Specifies either the option to copy file creation time from a source file(source) to a target file or a time value in ISO + // 8601 format to set as creation time on a target file. + FileCreationTime *string + // Specifies either the option to copy file last write time from a source file(source) to a target file or a time value in + // ISO 8601 format to set as last write time on a target file. + FileLastWriteTime *string + // Specifies the option to copy file security descriptor from source file or to set it using the value which is defined by + // the header value of x-ms-file-permission or x-ms-file-permission-key. + FilePermissionCopyMode *PermissionCopyModeType + // Specifies the option to overwrite the target file if it already exists and has read-only attribute set. + IgnoreReadOnly *bool + // Specifies the option to set archive attribute on a target file. True means archive attribute will be set on a target file + // despite attribute overrides or a source file state. + SetArchiveAttribute *bool +} + +// CORSRule - CORS is an HTTP feature that enables a web application running under one domain to access resources in another +// domain. Web browsers implement a security restriction known as same-origin policy that +// prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin +// domain) to call APIs in another domain. +type CORSRule struct { + // REQUIRED; The request headers that the origin domain may specify on the CORS request. + AllowedHeaders *string `xml:"AllowedHeaders"` + + // REQUIRED; The methods (HTTP request verbs) that the origin domain may use for a CORS request. (comma separated) + AllowedMethods *string `xml:"AllowedMethods"` + + // REQUIRED; The origin domains that are permitted to make a request against the storage service via CORS. The origin domain + // is the domain from which the request originates. Note that the origin must be an exact + // case-sensitive match with the origin that the user age sends to the service. You can also use the wildcard character '*' + // to allow all origin domains to make requests via CORS. + AllowedOrigins *string `xml:"AllowedOrigins"` + + // REQUIRED; The response headers that may be sent in the response to the CORS request and exposed by the browser to the request + // issuer. + ExposedHeaders *string `xml:"ExposedHeaders"` + + // REQUIRED; The maximum amount time that a browser should cache the preflight OPTIONS request. + MaxAgeInSeconds *int32 `xml:"MaxAgeInSeconds"` +} + +// Directory - A listed directory item. +type Directory struct { + // REQUIRED + Name *string `xml:"Name"` + Attributes *string `xml:"Attributes"` + ID *string `xml:"FileId"` + PermissionKey *string `xml:"PermissionKey"` + + // File properties. + Properties *FileProperty `xml:"Properties"` +} + +// DirectoryClientCreateOptions contains the optional parameters for the DirectoryClient.Create method. +type DirectoryClientCreateOptions struct { + // If specified the permission (security descriptor) shall be set for the directory/file. This header can be used if Permission + // size is <= 8KB, else x-ms-file-permission-key header shall be used. Default + // value: Inherit. If SDDL is specified as input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission + // or x-ms-file-permission-key should be specified. + FilePermission *string + // Key of the permission to be set for the directory/file. Note: Only one of the x-ms-file-permission or x-ms-file-permission-key + // should be specified. + FilePermissionKey *string + // A name-value pair to associate with a file storage object. + Metadata map[string]*string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// DirectoryClientDeleteOptions contains the optional parameters for the DirectoryClient.Delete method. +type DirectoryClientDeleteOptions struct { + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// DirectoryClientForceCloseHandlesOptions contains the optional parameters for the DirectoryClient.ForceCloseHandles method. +type DirectoryClientForceCloseHandlesOptions struct { + // A string value that identifies the portion of the list to be returned with the next list operation. The operation returns + // a marker value within the response body if the list returned was not complete. + // The marker value may then be used in a subsequent call to request the next set of list items. The marker value is opaque + // to the client. + Marker *string + // Specifies operation should apply to the directory specified in the URI, its files, its subdirectories and their files. + Recursive *bool + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + Sharesnapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// DirectoryClientGetPropertiesOptions contains the optional parameters for the DirectoryClient.GetProperties method. +type DirectoryClientGetPropertiesOptions struct { + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + Sharesnapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// DirectoryClientListFilesAndDirectoriesSegmentOptions contains the optional parameters for the DirectoryClient.NewListFilesAndDirectoriesSegmentPager +// method. +type DirectoryClientListFilesAndDirectoriesSegmentOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []ListFilesIncludeType + // Include extended information. + IncludeExtendedInfo *bool + // A string value that identifies the portion of the list to be returned with the next list operation. The operation returns + // a marker value within the response body if the list returned was not complete. + // The marker value may then be used in a subsequent call to request the next set of list items. The marker value is opaque + // to the client. + Marker *string + // Specifies the maximum number of entries to return. If the request does not specify maxresults, or specifies a value greater + // than 5,000, the server will return up to 5,000 items. + Maxresults *int32 + // Filters the results to return only entries whose name begins with the specified prefix. + Prefix *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + Sharesnapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// DirectoryClientListHandlesOptions contains the optional parameters for the DirectoryClient.ListHandles method. +type DirectoryClientListHandlesOptions struct { + // A string value that identifies the portion of the list to be returned with the next list operation. The operation returns + // a marker value within the response body if the list returned was not complete. + // The marker value may then be used in a subsequent call to request the next set of list items. The marker value is opaque + // to the client. + Marker *string + // Specifies the maximum number of entries to return. If the request does not specify maxresults, or specifies a value greater + // than 5,000, the server will return up to 5,000 items. + Maxresults *int32 + // Specifies operation should apply to the directory specified in the URI, its files, its subdirectories and their files. + Recursive *bool + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + Sharesnapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// DirectoryClientSetMetadataOptions contains the optional parameters for the DirectoryClient.SetMetadata method. +type DirectoryClientSetMetadataOptions struct { + // A name-value pair to associate with a file storage object. + Metadata map[string]*string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// DirectoryClientSetPropertiesOptions contains the optional parameters for the DirectoryClient.SetProperties method. +type DirectoryClientSetPropertiesOptions struct { + // If specified the permission (security descriptor) shall be set for the directory/file. This header can be used if Permission + // size is <= 8KB, else x-ms-file-permission-key header shall be used. Default + // value: Inherit. If SDDL is specified as input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission + // or x-ms-file-permission-key should be specified. + FilePermission *string + // Key of the permission to be set for the directory/file. Note: Only one of the x-ms-file-permission or x-ms-file-permission-key + // should be specified. + FilePermissionKey *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// File - A listed file item. +type File struct { + // REQUIRED + Name *string `xml:"Name"` + + // REQUIRED; File properties. + Properties *FileProperty `xml:"Properties"` + Attributes *string `xml:"Attributes"` + ID *string `xml:"FileId"` + PermissionKey *string `xml:"PermissionKey"` +} + +// FileClientAbortCopyOptions contains the optional parameters for the FileClient.AbortCopy method. +type FileClientAbortCopyOptions struct { + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// FileClientAcquireLeaseOptions contains the optional parameters for the FileClient.AcquireLease method. +type FileClientAcquireLeaseOptions struct { + // Proposed lease ID, in a GUID string format. The File service returns 400 (Invalid request) if the proposed lease ID is + // not in the correct format. See Guid Constructor (String) for a list of valid GUID + // string formats. + ProposedLeaseID *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// FileClientBreakLeaseOptions contains the optional parameters for the FileClient.BreakLease method. +type FileClientBreakLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// FileClientChangeLeaseOptions contains the optional parameters for the FileClient.ChangeLease method. +type FileClientChangeLeaseOptions struct { + // Proposed lease ID, in a GUID string format. The File service returns 400 (Invalid request) if the proposed lease ID is + // not in the correct format. See Guid Constructor (String) for a list of valid GUID + // string formats. + ProposedLeaseID *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// FileClientCreateOptions contains the optional parameters for the FileClient.Create method. +type FileClientCreateOptions struct { + // If specified the permission (security descriptor) shall be set for the directory/file. This header can be used if Permission + // size is <= 8KB, else x-ms-file-permission-key header shall be used. Default + // value: Inherit. If SDDL is specified as input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission + // or x-ms-file-permission-key should be specified. + FilePermission *string + // Key of the permission to be set for the directory/file. Note: Only one of the x-ms-file-permission or x-ms-file-permission-key + // should be specified. + FilePermissionKey *string + // A name-value pair to associate with a file storage object. + Metadata map[string]*string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// FileClientDeleteOptions contains the optional parameters for the FileClient.Delete method. +type FileClientDeleteOptions struct { + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// FileClientDownloadOptions contains the optional parameters for the FileClient.Download method. +type FileClientDownloadOptions struct { + // Return file data only from the specified byte range. + Range *string + // When this header is set to true and specified together with the Range header, the service returns the MD5 hash for the + // range, as long as the range is less than or equal to 4 MB in size. + RangeGetContentMD5 *bool + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// FileClientForceCloseHandlesOptions contains the optional parameters for the FileClient.ForceCloseHandles method. +type FileClientForceCloseHandlesOptions struct { + // A string value that identifies the portion of the list to be returned with the next list operation. The operation returns + // a marker value within the response body if the list returned was not complete. + // The marker value may then be used in a subsequent call to request the next set of list items. The marker value is opaque + // to the client. + Marker *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + Sharesnapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// FileClientGetPropertiesOptions contains the optional parameters for the FileClient.GetProperties method. +type FileClientGetPropertiesOptions struct { + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + Sharesnapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// FileClientGetRangeListOptions contains the optional parameters for the FileClient.GetRangeList method. +type FileClientGetRangeListOptions struct { + // The previous snapshot parameter is an opaque DateTime value that, when present, specifies the previous snapshot. + Prevsharesnapshot *string + // Specifies the range of bytes over which to list ranges, inclusively. + Range *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + Sharesnapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// FileClientListHandlesOptions contains the optional parameters for the FileClient.ListHandles method. +type FileClientListHandlesOptions struct { + // A string value that identifies the portion of the list to be returned with the next list operation. The operation returns + // a marker value within the response body if the list returned was not complete. + // The marker value may then be used in a subsequent call to request the next set of list items. The marker value is opaque + // to the client. + Marker *string + // Specifies the maximum number of entries to return. If the request does not specify maxresults, or specifies a value greater + // than 5,000, the server will return up to 5,000 items. + Maxresults *int32 + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + Sharesnapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// FileClientReleaseLeaseOptions contains the optional parameters for the FileClient.ReleaseLease method. +type FileClientReleaseLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// FileClientSetHTTPHeadersOptions contains the optional parameters for the FileClient.SetHTTPHeaders method. +type FileClientSetHTTPHeadersOptions struct { + // Resizes a file to the specified size. If the specified byte value is less than the current size of the file, then all ranges + // above the specified byte value are cleared. + FileContentLength *int64 + // If specified the permission (security descriptor) shall be set for the directory/file. This header can be used if Permission + // size is <= 8KB, else x-ms-file-permission-key header shall be used. Default + // value: Inherit. If SDDL is specified as input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission + // or x-ms-file-permission-key should be specified. + FilePermission *string + // Key of the permission to be set for the directory/file. Note: Only one of the x-ms-file-permission or x-ms-file-permission-key + // should be specified. + FilePermissionKey *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// FileClientSetMetadataOptions contains the optional parameters for the FileClient.SetMetadata method. +type FileClientSetMetadataOptions struct { + // A name-value pair to associate with a file storage object. + Metadata map[string]*string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// FileClientStartCopyOptions contains the optional parameters for the FileClient.StartCopy method. +type FileClientStartCopyOptions struct { + // If specified the permission (security descriptor) shall be set for the directory/file. This header can be used if Permission + // size is <= 8KB, else x-ms-file-permission-key header shall be used. Default + // value: Inherit. If SDDL is specified as input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission + // or x-ms-file-permission-key should be specified. + FilePermission *string + // Key of the permission to be set for the directory/file. Note: Only one of the x-ms-file-permission or x-ms-file-permission-key + // should be specified. + FilePermissionKey *string + // A name-value pair to associate with a file storage object. + Metadata map[string]*string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// FileClientUploadRangeFromURLOptions contains the optional parameters for the FileClient.UploadRangeFromURL method. +type FileClientUploadRangeFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentCRC64 []byte + // Bytes of source data in the specified range. + SourceRange *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// FileClientUploadRangeOptions contains the optional parameters for the FileClient.UploadRange method. +type FileClientUploadRangeOptions struct { + // An MD5 hash of the content. This hash is used to verify the integrity of the data during transport. When the Content-MD5 + // header is specified, the File service compares the hash of the content that has + // arrived with the header value that was sent. If the two hashes do not match, the operation will fail with error code 400 + // (Bad Request). + ContentMD5 []byte + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// FileProperty - File properties. +type FileProperty struct { + // REQUIRED; Content length of the file. This value may not be up-to-date since an SMB client may have modified the file locally. + // The value of Content-Length may not reflect that fact until the handle is closed or + // the op-lock is broken. To retrieve current property values, call Get File Properties. + ContentLength *int64 `xml:"Content-Length"` + ChangeTime *time.Time `xml:"ChangeTime"` + CreationTime *time.Time `xml:"CreationTime"` + ETag *azcore.ETag `xml:"Etag"` + LastAccessTime *time.Time `xml:"LastAccessTime"` + LastModified *time.Time `xml:"Last-Modified"` + LastWriteTime *time.Time `xml:"LastWriteTime"` +} + +// FileRange - An Azure Storage file range. +type FileRange struct { + // REQUIRED; End of the range. + End *int64 `xml:"End"` + + // REQUIRED; Start of the range. + Start *int64 `xml:"Start"` +} + +// FilesAndDirectoriesListSegment - Abstract for entries that can be listed from Directory. +type FilesAndDirectoriesListSegment struct { + // REQUIRED + Directories []*Directory `xml:"Directory"` + + // REQUIRED + Files []*File `xml:"File"` +} + +// Handle - A listed Azure Storage handle item. +type Handle struct { + // REQUIRED; Client IP that opened the handle + ClientIP *string `xml:"ClientIp"` + + // REQUIRED; FileId uniquely identifies the file or directory. + FileID *string `xml:"FileId"` + + // REQUIRED; XSMB service handle ID + ID *string `xml:"HandleId"` + + // REQUIRED; Time when the session that previously opened the handle has last been reconnected. (UTC) + OpenTime *time.Time `xml:"OpenTime"` + + // REQUIRED; File or directory name including full path starting from share root + Path *string `xml:"Path"` + + // REQUIRED; SMB session ID in context of which the file handle was opened + SessionID *string `xml:"SessionId"` + + // Time handle was last connected to (UTC) + LastReconnectTime *time.Time `xml:"LastReconnectTime"` + + // ParentId uniquely identifies the parent directory of the object. + ParentID *string `xml:"ParentId"` +} + +// LeaseAccessConditions contains a group of parameters for the ShareClient.GetProperties method. +type LeaseAccessConditions struct { + // If specified, the operation only succeeds if the resource's lease is active and matches this ID. + LeaseID *string +} + +// ListFilesAndDirectoriesSegmentResponse - An enumeration of directories and files. +type ListFilesAndDirectoriesSegmentResponse struct { + // REQUIRED + DirectoryPath *string `xml:"DirectoryPath,attr"` + + // REQUIRED + NextMarker *string `xml:"NextMarker"` + + // REQUIRED + Prefix *string `xml:"Prefix"` + + // REQUIRED; Abstract for entries that can be listed from Directory. + Segment *FilesAndDirectoriesListSegment `xml:"Entries"` + + // REQUIRED + ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` + + // REQUIRED + ShareName *string `xml:"ShareName,attr"` + DirectoryID *string `xml:"DirectoryId"` + Marker *string `xml:"Marker"` + MaxResults *int32 `xml:"MaxResults"` + ShareSnapshot *string `xml:"ShareSnapshot,attr"` +} + +// ListHandlesResponse - An enumeration of handles. +type ListHandlesResponse struct { + // REQUIRED + NextMarker *string `xml:"NextMarker"` + Handles []*Handle `xml:"Entries>Handle"` +} + +// ListSharesResponse - An enumeration of shares. +type ListSharesResponse struct { + // REQUIRED + NextMarker *string `xml:"NextMarker"` + + // REQUIRED + ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` + Marker *string `xml:"Marker"` + MaxResults *int32 `xml:"MaxResults"` + Prefix *string `xml:"Prefix"` + Shares []*Share `xml:"Shares>Share"` +} + +// Metrics - Storage Analytics metrics for file service. +type Metrics struct { + // REQUIRED; Indicates whether metrics are enabled for the File service. + Enabled *bool `xml:"Enabled"` + + // REQUIRED; The version of Storage Analytics to configure. + Version *string `xml:"Version"` + + // Indicates whether metrics should generate summary statistics for called API operations. + IncludeAPIs *bool `xml:"IncludeAPIs"` + + // The retention policy. + RetentionPolicy *RetentionPolicy `xml:"RetentionPolicy"` +} + +// ProtocolSettings - Protocol settings +type ProtocolSettings struct { + // Settings for SMB protocol. + Smb *SMBSettings `xml:"SMB"` +} + +// RetentionPolicy - The retention policy. +type RetentionPolicy struct { + // REQUIRED; Indicates whether a retention policy is enabled for the File service. If false, metrics data is retained, and + // the user is responsible for deleting it. + Enabled *bool `xml:"Enabled"` + + // Indicates the number of days that metrics data should be retained. All data older than this value will be deleted. Metrics + // data is deleted on a best-effort basis after the retention period expires. + Days *int32 `xml:"Days"` +} + +// SMBSettings - Settings for SMB protocol. +type SMBSettings struct { + // Settings for SMB Multichannel. + Multichannel *SMBMultichannel `xml:"Multichannel"` +} + +// ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method. +type ServiceClientGetPropertiesOptions struct { + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// ServiceClientListSharesSegmentOptions contains the optional parameters for the ServiceClient.NewListSharesSegmentPager +// method. +type ServiceClientListSharesSegmentOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []ListSharesIncludeType + // A string value that identifies the portion of the list to be returned with the next list operation. The operation returns + // a marker value within the response body if the list returned was not complete. + // The marker value may then be used in a subsequent call to request the next set of list items. The marker value is opaque + // to the client. + Marker *string + // Specifies the maximum number of entries to return. If the request does not specify maxresults, or specifies a value greater + // than 5,000, the server will return up to 5,000 items. + Maxresults *int32 + // Filters the results to return only entries whose name begins with the specified prefix. + Prefix *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method. +type ServiceClientSetPropertiesOptions struct { + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// Share - A listed Azure Storage share item. +type Share struct { + // REQUIRED + Name *string `xml:"Name"` + + // REQUIRED; Properties of a share. + Properties *ShareProperties `xml:"Properties"` + Deleted *bool `xml:"Deleted"` + + // Dictionary of + Metadata map[string]*string `xml:"Metadata"` + Snapshot *string `xml:"Snapshot"` + Version *string `xml:"Version"` +} + +// ShareClientAcquireLeaseOptions contains the optional parameters for the ShareClient.AcquireLease method. +type ShareClientAcquireLeaseOptions struct { + // Proposed lease ID, in a GUID string format. The File service returns 400 (Invalid request) if the proposed lease ID is + // not in the correct format. See Guid Constructor (String) for a list of valid GUID + // string formats. + ProposedLeaseID *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + Sharesnapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// ShareClientBreakLeaseOptions contains the optional parameters for the ShareClient.BreakLease method. +type ShareClientBreakLeaseOptions struct { + // For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This + // break period is only used if it is shorter than the time remaining on the + // lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has + // expired, but the lease may be held for longer than the break period. If this + // header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, + // and an infinite lease breaks immediately. + BreakPeriod *int32 + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + Sharesnapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// ShareClientChangeLeaseOptions contains the optional parameters for the ShareClient.ChangeLease method. +type ShareClientChangeLeaseOptions struct { + // Proposed lease ID, in a GUID string format. The File service returns 400 (Invalid request) if the proposed lease ID is + // not in the correct format. See Guid Constructor (String) for a list of valid GUID + // string formats. + ProposedLeaseID *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + Sharesnapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// ShareClientCreateOptions contains the optional parameters for the ShareClient.Create method. +type ShareClientCreateOptions struct { + // Specifies the access tier of the share. + AccessTier *ShareAccessTier + // Protocols to enable on the share. + EnabledProtocols *string + // A name-value pair to associate with a file storage object. + Metadata map[string]*string + // Specifies the maximum size of the share, in gigabytes. + Quota *int32 + // Root squash to set on the share. Only valid for NFS shares. + RootSquash *ShareRootSquash + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// ShareClientCreatePermissionOptions contains the optional parameters for the ShareClient.CreatePermission method. +type ShareClientCreatePermissionOptions struct { + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// ShareClientCreateSnapshotOptions contains the optional parameters for the ShareClient.CreateSnapshot method. +type ShareClientCreateSnapshotOptions struct { + // A name-value pair to associate with a file storage object. + Metadata map[string]*string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// ShareClientDeleteOptions contains the optional parameters for the ShareClient.Delete method. +type ShareClientDeleteOptions struct { + // Specifies the option include to delete the base share and all of its snapshots. + DeleteSnapshots *DeleteSnapshotsOptionType + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + Sharesnapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// ShareClientGetAccessPolicyOptions contains the optional parameters for the ShareClient.GetAccessPolicy method. +type ShareClientGetAccessPolicyOptions struct { + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// ShareClientGetPermissionOptions contains the optional parameters for the ShareClient.GetPermission method. +type ShareClientGetPermissionOptions struct { + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// ShareClientGetPropertiesOptions contains the optional parameters for the ShareClient.GetProperties method. +type ShareClientGetPropertiesOptions struct { + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + Sharesnapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// ShareClientGetStatisticsOptions contains the optional parameters for the ShareClient.GetStatistics method. +type ShareClientGetStatisticsOptions struct { + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// ShareClientReleaseLeaseOptions contains the optional parameters for the ShareClient.ReleaseLease method. +type ShareClientReleaseLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + Sharesnapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// ShareClientRenewLeaseOptions contains the optional parameters for the ShareClient.RenewLease method. +type ShareClientRenewLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + Sharesnapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// ShareClientRestoreOptions contains the optional parameters for the ShareClient.Restore method. +type ShareClientRestoreOptions struct { + // Specifies the name of the previously-deleted share. + DeletedShareName *string + // Specifies the version of the previously-deleted share. + DeletedShareVersion *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// ShareClientSetAccessPolicyOptions contains the optional parameters for the ShareClient.SetAccessPolicy method. +type ShareClientSetAccessPolicyOptions struct { + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// ShareClientSetMetadataOptions contains the optional parameters for the ShareClient.SetMetadata method. +type ShareClientSetMetadataOptions struct { + // A name-value pair to associate with a file storage object. + Metadata map[string]*string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// ShareClientSetPropertiesOptions contains the optional parameters for the ShareClient.SetProperties method. +type ShareClientSetPropertiesOptions struct { + // Specifies the access tier of the share. + AccessTier *ShareAccessTier + // Specifies the maximum size of the share, in gigabytes. + Quota *int32 + // Root squash to set on the share. Only valid for NFS shares. + RootSquash *ShareRootSquash + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for File Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN] + Timeout *int32 +} + +// ShareFileHTTPHeaders contains a group of parameters for the FileClient.Create method. +type ShareFileHTTPHeaders struct { + // Sets the file's cache control. The File service stores this value but does not use or modify it. + CacheControl *string + // Sets the file's Content-Disposition header. + ContentDisposition *string + // Specifies which content encodings have been applied to the file. + ContentEncoding *string + // Specifies the natural languages used by this resource. + ContentLanguage *string + // Sets the file's MD5 hash. + ContentMD5 []byte + // Sets the MIME content type of the file. The default type is 'application/octet-stream'. + ContentType *string +} + +// ShareFileRangeList - The list of file ranges +type ShareFileRangeList struct { + ClearRanges []*ClearRange `xml:"ClearRange"` + Ranges []*FileRange `xml:"Range"` +} + +// SharePermission - A permission (a security descriptor) at the share level. +type SharePermission struct { + // REQUIRED; The permission in the Security Descriptor Definition Language (SDDL). + Permission *string `json:"permission,omitempty"` +} + +// ShareProperties - Properties of a share. +type ShareProperties struct { + // REQUIRED + ETag *azcore.ETag `xml:"Etag"` + + // REQUIRED + LastModified *time.Time `xml:"Last-Modified"` + + // REQUIRED + Quota *int32 `xml:"Quota"` + AccessTier *string `xml:"AccessTier"` + AccessTierChangeTime *time.Time `xml:"AccessTierChangeTime"` + AccessTierTransitionState *string `xml:"AccessTierTransitionState"` + DeletedTime *time.Time `xml:"DeletedTime"` + EnabledProtocols *string `xml:"EnabledProtocols"` + + // When a share is leased, specifies whether the lease is of infinite or fixed duration. + LeaseDuration *LeaseDurationType `xml:"LeaseDuration"` + + // Lease state of the share. + LeaseState *LeaseStateType `xml:"LeaseState"` + + // The current lease status of the share. + LeaseStatus *LeaseStatusType `xml:"LeaseStatus"` + NextAllowedQuotaDowngradeTime *time.Time `xml:"NextAllowedQuotaDowngradeTime"` + ProvisionedEgressMBps *int32 `xml:"ProvisionedEgressMBps"` + ProvisionedIngressMBps *int32 `xml:"ProvisionedIngressMBps"` + ProvisionedIops *int32 `xml:"ProvisionedIops"` + RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` + RootSquash *ShareRootSquash `xml:"RootSquash"` +} + +// ShareStats - Stats for the share. +type ShareStats struct { + // REQUIRED; The approximate size of the data stored in bytes. Note that this value may not include all recently created or + // recently resized files. + ShareUsageBytes *int64 `xml:"ShareUsageBytes"` +} + +// SignedIdentifier - Signed identifier. +type SignedIdentifier struct { + // REQUIRED; A unique id. + ID *string `xml:"Id"` + + // The access policy. + AccessPolicy *AccessPolicy `xml:"AccessPolicy"` +} + +// SMBMultichannel - Settings for SMB multichannel +type SMBMultichannel struct { + // If SMB multichannel is enabled. + Enabled *bool `xml:"Enabled"` +} + +// SourceModifiedAccessConditions contains a group of parameters for the FileClient.UploadRangeFromURL method. +type SourceModifiedAccessConditions struct { + // Specify the crc64 value to operate only on range with a matching crc64 checksum. + SourceIfMatchCRC64 []byte + // Specify the crc64 value to operate only on range without a matching crc64 checksum. + SourceIfNoneMatchCRC64 []byte +} + +type StorageError struct { + Message *string `json:"Message,omitempty"` +} + +// StorageServiceProperties - Storage service properties. +type StorageServiceProperties struct { + // The set of CORS rules. + CORS []*CORSRule `xml:"Cors>CorsRule"` + + // A summary of request statistics grouped by API in hourly aggregates for files. + HourMetrics *Metrics `xml:"HourMetrics"` + + // A summary of request statistics grouped by API in minute aggregates for files. + MinuteMetrics *Metrics `xml:"MinuteMetrics"` + + // Protocol settings + Protocol *ProtocolSettings `xml:"ProtocolSettings"` +} diff --git a/sdk/storage/azfile/internal/generated/zz_models_serde.go b/sdk/storage/azfile/internal/generated/zz_models_serde.go new file mode 100644 index 000000000000..7f837baac65c --- /dev/null +++ b/sdk/storage/azfile/internal/generated/zz_models_serde.go @@ -0,0 +1,344 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "encoding/json" + "encoding/xml" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "reflect" + "time" +) + +// MarshalXML implements the xml.Marshaller interface for type AccessPolicy. +func (a AccessPolicy) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias AccessPolicy + aux := &struct { + *alias + Expiry *timeRFC3339 `xml:"Expiry"` + Start *timeRFC3339 `xml:"Start"` + }{ + alias: (*alias)(&a), + Expiry: (*timeRFC3339)(a.Expiry), + Start: (*timeRFC3339)(a.Start), + } + return enc.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type AccessPolicy. +func (a *AccessPolicy) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias AccessPolicy + aux := &struct { + *alias + Expiry *timeRFC3339 `xml:"Expiry"` + Start *timeRFC3339 `xml:"Start"` + }{ + alias: (*alias)(a), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + a.Expiry = (*time.Time)(aux.Expiry) + a.Start = (*time.Time)(aux.Start) + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type FileProperty. +func (f FileProperty) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias FileProperty + aux := &struct { + *alias + ChangeTime *timeRFC3339 `xml:"ChangeTime"` + CreationTime *timeRFC3339 `xml:"CreationTime"` + LastAccessTime *timeRFC3339 `xml:"LastAccessTime"` + LastModified *timeRFC1123 `xml:"Last-Modified"` + LastWriteTime *timeRFC3339 `xml:"LastWriteTime"` + }{ + alias: (*alias)(&f), + ChangeTime: (*timeRFC3339)(f.ChangeTime), + CreationTime: (*timeRFC3339)(f.CreationTime), + LastAccessTime: (*timeRFC3339)(f.LastAccessTime), + LastModified: (*timeRFC1123)(f.LastModified), + LastWriteTime: (*timeRFC3339)(f.LastWriteTime), + } + return enc.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type FileProperty. +func (f *FileProperty) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias FileProperty + aux := &struct { + *alias + ChangeTime *timeRFC3339 `xml:"ChangeTime"` + CreationTime *timeRFC3339 `xml:"CreationTime"` + LastAccessTime *timeRFC3339 `xml:"LastAccessTime"` + LastModified *timeRFC1123 `xml:"Last-Modified"` + LastWriteTime *timeRFC3339 `xml:"LastWriteTime"` + }{ + alias: (*alias)(f), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + f.ChangeTime = (*time.Time)(aux.ChangeTime) + f.CreationTime = (*time.Time)(aux.CreationTime) + f.LastAccessTime = (*time.Time)(aux.LastAccessTime) + f.LastModified = (*time.Time)(aux.LastModified) + f.LastWriteTime = (*time.Time)(aux.LastWriteTime) + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type FilesAndDirectoriesListSegment. +func (f FilesAndDirectoriesListSegment) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias FilesAndDirectoriesListSegment + aux := &struct { + *alias + Directories *[]*Directory `xml:"Directory"` + Files *[]*File `xml:"File"` + }{ + alias: (*alias)(&f), + } + if f.Directories != nil { + aux.Directories = &f.Directories + } + if f.Files != nil { + aux.Files = &f.Files + } + return enc.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type Handle. +func (h Handle) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias Handle + aux := &struct { + *alias + LastReconnectTime *timeRFC1123 `xml:"LastReconnectTime"` + OpenTime *timeRFC1123 `xml:"OpenTime"` + }{ + alias: (*alias)(&h), + LastReconnectTime: (*timeRFC1123)(h.LastReconnectTime), + OpenTime: (*timeRFC1123)(h.OpenTime), + } + return enc.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type Handle. +func (h *Handle) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias Handle + aux := &struct { + *alias + LastReconnectTime *timeRFC1123 `xml:"LastReconnectTime"` + OpenTime *timeRFC1123 `xml:"OpenTime"` + }{ + alias: (*alias)(h), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + h.LastReconnectTime = (*time.Time)(aux.LastReconnectTime) + h.OpenTime = (*time.Time)(aux.OpenTime) + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type ListHandlesResponse. +func (l ListHandlesResponse) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias ListHandlesResponse + aux := &struct { + *alias + Handles *[]*Handle `xml:"Entries>Handle"` + }{ + alias: (*alias)(&l), + } + if l.Handles != nil { + aux.Handles = &l.Handles + } + return enc.EncodeElement(aux, start) +} + +// MarshalXML implements the xml.Marshaller interface for type ListSharesResponse. +func (l ListSharesResponse) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias ListSharesResponse + aux := &struct { + *alias + Shares *[]*Share `xml:"Shares>Share"` + }{ + alias: (*alias)(&l), + } + if l.Shares != nil { + aux.Shares = &l.Shares + } + return enc.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type Share. +func (s *Share) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias Share + aux := &struct { + *alias + Metadata additionalProperties `xml:"Metadata"` + }{ + alias: (*alias)(s), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + s.Metadata = (map[string]*string)(aux.Metadata) + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type ShareFileRangeList. +func (s ShareFileRangeList) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias ShareFileRangeList + aux := &struct { + *alias + ClearRanges *[]*ClearRange `xml:"ClearRange"` + Ranges *[]*FileRange `xml:"Range"` + }{ + alias: (*alias)(&s), + } + if s.ClearRanges != nil { + aux.ClearRanges = &s.ClearRanges + } + if s.Ranges != nil { + aux.Ranges = &s.Ranges + } + return enc.EncodeElement(aux, start) +} + +// MarshalJSON implements the json.Marshaller interface for type SharePermission. +func (s SharePermission) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "permission", s.Permission) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type SharePermission. +func (s *SharePermission) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "permission": + err = unpopulate(val, "Permission", &s.Permission) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type ShareProperties. +func (s ShareProperties) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias ShareProperties + aux := &struct { + *alias + AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"` + DeletedTime *timeRFC1123 `xml:"DeletedTime"` + LastModified *timeRFC1123 `xml:"Last-Modified"` + NextAllowedQuotaDowngradeTime *timeRFC1123 `xml:"NextAllowedQuotaDowngradeTime"` + }{ + alias: (*alias)(&s), + AccessTierChangeTime: (*timeRFC1123)(s.AccessTierChangeTime), + DeletedTime: (*timeRFC1123)(s.DeletedTime), + LastModified: (*timeRFC1123)(s.LastModified), + NextAllowedQuotaDowngradeTime: (*timeRFC1123)(s.NextAllowedQuotaDowngradeTime), + } + return enc.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type ShareProperties. +func (s *ShareProperties) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias ShareProperties + aux := &struct { + *alias + AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"` + DeletedTime *timeRFC1123 `xml:"DeletedTime"` + LastModified *timeRFC1123 `xml:"Last-Modified"` + NextAllowedQuotaDowngradeTime *timeRFC1123 `xml:"NextAllowedQuotaDowngradeTime"` + }{ + alias: (*alias)(s), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + s.AccessTierChangeTime = (*time.Time)(aux.AccessTierChangeTime) + s.DeletedTime = (*time.Time)(aux.DeletedTime) + s.LastModified = (*time.Time)(aux.LastModified) + s.NextAllowedQuotaDowngradeTime = (*time.Time)(aux.NextAllowedQuotaDowngradeTime) + return nil +} + +// MarshalJSON implements the json.Marshaller interface for type StorageError. +func (s StorageError) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]any) + populate(objectMap, "Message", s.Message) + return json.Marshal(objectMap) +} + +// UnmarshalJSON implements the json.Unmarshaller interface for type StorageError. +func (s *StorageError) UnmarshalJSON(data []byte) error { + var rawMsg map[string]json.RawMessage + if err := json.Unmarshal(data, &rawMsg); err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + for key, val := range rawMsg { + var err error + switch key { + case "Message": + err = unpopulate(val, "Message", &s.Message) + delete(rawMsg, key) + } + if err != nil { + return fmt.Errorf("unmarshalling type %T: %v", s, err) + } + } + return nil +} + +// MarshalXML implements the xml.Marshaller interface for type StorageServiceProperties. +func (s StorageServiceProperties) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + type alias StorageServiceProperties + aux := &struct { + *alias + CORS *[]*CORSRule `xml:"Cors>CorsRule"` + }{ + alias: (*alias)(&s), + } + if s.CORS != nil { + aux.CORS = &s.CORS + } + return enc.EncodeElement(aux, start) +} + +func populate(m map[string]any, k string, v any) { + if v == nil { + return + } else if azcore.IsNullValue(v) { + m[k] = nil + } else if !reflect.ValueOf(v).IsNil() { + m[k] = v + } +} + +func unpopulate(data json.RawMessage, fn string, v any) error { + if data == nil { + return nil + } + if err := json.Unmarshal(data, v); err != nil { + return fmt.Errorf("struct field %s: %v", fn, err) + } + return nil +} diff --git a/sdk/storage/azfile/internal/generated/zz_response_types.go b/sdk/storage/azfile/internal/generated/zz_response_types.go new file mode 100644 index 000000000000..be6bf1f60562 --- /dev/null +++ b/sdk/storage/azfile/internal/generated/zz_response_types.go @@ -0,0 +1,1189 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "io" + "time" +) + +// DirectoryClientCreateResponse contains the response from method DirectoryClient.Create. +type DirectoryClientCreateResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // FileAttributes contains the information returned from the x-ms-file-attributes header response. + FileAttributes *string + + // FileChangeTime contains the information returned from the x-ms-file-change-time header response. + FileChangeTime *time.Time + + // FileCreationTime contains the information returned from the x-ms-file-creation-time header response. + FileCreationTime *time.Time + + // ID contains the information returned from the x-ms-file-id header response. + ID *string + + // FileLastWriteTime contains the information returned from the x-ms-file-last-write-time header response. + FileLastWriteTime *time.Time + + // ParentID contains the information returned from the x-ms-file-parent-id header response. + ParentID *string + + // FilePermissionKey contains the information returned from the x-ms-file-permission-key header response. + FilePermissionKey *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// DirectoryClientDeleteResponse contains the response from method DirectoryClient.Delete. +type DirectoryClientDeleteResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// DirectoryClientForceCloseHandlesResponse contains the response from method DirectoryClient.ForceCloseHandles. +type DirectoryClientForceCloseHandlesResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // Marker contains the information returned from the x-ms-marker header response. + Marker *string + + // NumberOfHandlesClosed contains the information returned from the x-ms-number-of-handles-closed header response. + NumberOfHandlesClosed *int32 + + // NumberOfHandlesFailedToClose contains the information returned from the x-ms-number-of-handles-failed header response. + NumberOfHandlesFailedToClose *int32 + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// DirectoryClientGetPropertiesResponse contains the response from method DirectoryClient.GetProperties. +type DirectoryClientGetPropertiesResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // FileAttributes contains the information returned from the x-ms-file-attributes header response. + FileAttributes *string + + // FileChangeTime contains the information returned from the x-ms-file-change-time header response. + FileChangeTime *time.Time + + // FileCreationTime contains the information returned from the x-ms-file-creation-time header response. + FileCreationTime *time.Time + + // ID contains the information returned from the x-ms-file-id header response. + ID *string + + // FileLastWriteTime contains the information returned from the x-ms-file-last-write-time header response. + FileLastWriteTime *time.Time + + // ParentID contains the information returned from the x-ms-file-parent-id header response. + ParentID *string + + // FilePermissionKey contains the information returned from the x-ms-file-permission-key header response. + FilePermissionKey *string + + // IsServerEncrypted contains the information returned from the x-ms-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // Metadata contains the information returned from the x-ms-meta header response. + Metadata map[string]*string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// DirectoryClientListFilesAndDirectoriesSegmentResponse contains the response from method DirectoryClient.NewListFilesAndDirectoriesSegmentPager. +type DirectoryClientListFilesAndDirectoriesSegmentResponse struct { + ListFilesAndDirectoriesSegmentResponse + // ContentType contains the information returned from the Content-Type header response. + ContentType *string `xml:"ContentType"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// DirectoryClientListHandlesResponse contains the response from method DirectoryClient.ListHandles. +type DirectoryClientListHandlesResponse struct { + ListHandlesResponse + // ContentType contains the information returned from the Content-Type header response. + ContentType *string `xml:"ContentType"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// DirectoryClientSetMetadataResponse contains the response from method DirectoryClient.SetMetadata. +type DirectoryClientSetMetadataResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// DirectoryClientSetPropertiesResponse contains the response from method DirectoryClient.SetProperties. +type DirectoryClientSetPropertiesResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // FileAttributes contains the information returned from the x-ms-file-attributes header response. + FileAttributes *string + + // FileChangeTime contains the information returned from the x-ms-file-change-time header response. + FileChangeTime *time.Time + + // FileCreationTime contains the information returned from the x-ms-file-creation-time header response. + FileCreationTime *time.Time + + // ID contains the information returned from the x-ms-file-id header response. + ID *string + + // FileLastWriteTime contains the information returned from the x-ms-file-last-write-time header response. + FileLastWriteTime *time.Time + + // ParentID contains the information returned from the x-ms-file-parent-id header response. + ParentID *string + + // FilePermissionKey contains the information returned from the x-ms-file-permission-key header response. + FilePermissionKey *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// FileClientAbortCopyResponse contains the response from method FileClient.AbortCopy. +type FileClientAbortCopyResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// FileClientAcquireLeaseResponse contains the response from method FileClient.AcquireLease. +type FileClientAcquireLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// FileClientBreakLeaseResponse contains the response from method FileClient.BreakLease. +type FileClientBreakLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// FileClientChangeLeaseResponse contains the response from method FileClient.ChangeLease. +type FileClientChangeLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// FileClientCreateResponse contains the response from method FileClient.Create. +type FileClientCreateResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // FileAttributes contains the information returned from the x-ms-file-attributes header response. + FileAttributes *string + + // FileChangeTime contains the information returned from the x-ms-file-change-time header response. + FileChangeTime *time.Time + + // FileCreationTime contains the information returned from the x-ms-file-creation-time header response. + FileCreationTime *time.Time + + // ID contains the information returned from the x-ms-file-id header response. + ID *string + + // FileLastWriteTime contains the information returned from the x-ms-file-last-write-time header response. + FileLastWriteTime *time.Time + + // ParentID contains the information returned from the x-ms-file-parent-id header response. + ParentID *string + + // FilePermissionKey contains the information returned from the x-ms-file-permission-key header response. + FilePermissionKey *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// FileClientDeleteResponse contains the response from method FileClient.Delete. +type FileClientDeleteResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// FileClientDownloadResponse contains the response from method FileClient.Download. +type FileClientDownloadResponse struct { + // AcceptRanges contains the information returned from the Accept-Ranges header response. + AcceptRanges *string + + // Body contains the streaming response. + Body io.ReadCloser + + // CacheControl contains the information returned from the Cache-Control header response. + CacheControl *string + + // ContentDisposition contains the information returned from the Content-Disposition header response. + ContentDisposition *string + + // ContentEncoding contains the information returned from the Content-Encoding header response. + ContentEncoding *string + + // ContentLanguage contains the information returned from the Content-Language header response. + ContentLanguage *string + + // ContentLength contains the information returned from the Content-Length header response. + ContentLength *int64 + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // ContentRange contains the information returned from the Content-Range header response. + ContentRange *string + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // CopyCompletionTime contains the information returned from the x-ms-copy-completion-time header response. + CopyCompletionTime *time.Time + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyProgress contains the information returned from the x-ms-copy-progress header response. + CopyProgress *string + + // CopySource contains the information returned from the x-ms-copy-source header response. + CopySource *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *CopyStatusType + + // CopyStatusDescription contains the information returned from the x-ms-copy-status-description header response. + CopyStatusDescription *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // FileAttributes contains the information returned from the x-ms-file-attributes header response. + FileAttributes *string + + // FileChangeTime contains the information returned from the x-ms-file-change-time header response. + FileChangeTime *time.Time + + // FileContentMD5 contains the information returned from the x-ms-content-md5 header response. + FileContentMD5 []byte + + // FileCreationTime contains the information returned from the x-ms-file-creation-time header response. + FileCreationTime *time.Time + + // ID contains the information returned from the x-ms-file-id header response. + ID *string + + // FileLastWriteTime contains the information returned from the x-ms-file-last-write-time header response. + FileLastWriteTime *time.Time + + // ParentID contains the information returned from the x-ms-file-parent-id header response. + ParentID *string + + // FilePermissionKey contains the information returned from the x-ms-file-permission-key header response. + FilePermissionKey *string + + // IsServerEncrypted contains the information returned from the x-ms-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseDuration contains the information returned from the x-ms-lease-duration header response. + LeaseDuration *LeaseDurationType + + // LeaseState contains the information returned from the x-ms-lease-state header response. + LeaseState *LeaseStateType + + // LeaseStatus contains the information returned from the x-ms-lease-status header response. + LeaseStatus *LeaseStatusType + + // Metadata contains the information returned from the x-ms-meta header response. + Metadata map[string]*string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// FileClientForceCloseHandlesResponse contains the response from method FileClient.ForceCloseHandles. +type FileClientForceCloseHandlesResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // Marker contains the information returned from the x-ms-marker header response. + Marker *string + + // NumberOfHandlesClosed contains the information returned from the x-ms-number-of-handles-closed header response. + NumberOfHandlesClosed *int32 + + // NumberOfHandlesFailedToClose contains the information returned from the x-ms-number-of-handles-failed header response. + NumberOfHandlesFailedToClose *int32 + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// FileClientGetPropertiesResponse contains the response from method FileClient.GetProperties. +type FileClientGetPropertiesResponse struct { + // CacheControl contains the information returned from the Cache-Control header response. + CacheControl *string + + // ContentDisposition contains the information returned from the Content-Disposition header response. + ContentDisposition *string + + // ContentEncoding contains the information returned from the Content-Encoding header response. + ContentEncoding *string + + // ContentLanguage contains the information returned from the Content-Language header response. + ContentLanguage *string + + // ContentLength contains the information returned from the Content-Length header response. + ContentLength *int64 + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // CopyCompletionTime contains the information returned from the x-ms-copy-completion-time header response. + CopyCompletionTime *time.Time + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyProgress contains the information returned from the x-ms-copy-progress header response. + CopyProgress *string + + // CopySource contains the information returned from the x-ms-copy-source header response. + CopySource *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *CopyStatusType + + // CopyStatusDescription contains the information returned from the x-ms-copy-status-description header response. + CopyStatusDescription *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // FileAttributes contains the information returned from the x-ms-file-attributes header response. + FileAttributes *string + + // FileChangeTime contains the information returned from the x-ms-file-change-time header response. + FileChangeTime *time.Time + + // FileCreationTime contains the information returned from the x-ms-file-creation-time header response. + FileCreationTime *time.Time + + // ID contains the information returned from the x-ms-file-id header response. + ID *string + + // FileLastWriteTime contains the information returned from the x-ms-file-last-write-time header response. + FileLastWriteTime *time.Time + + // ParentID contains the information returned from the x-ms-file-parent-id header response. + ParentID *string + + // FilePermissionKey contains the information returned from the x-ms-file-permission-key header response. + FilePermissionKey *string + + // FileType contains the information returned from the x-ms-type header response. + FileType *string + + // IsServerEncrypted contains the information returned from the x-ms-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseDuration contains the information returned from the x-ms-lease-duration header response. + LeaseDuration *LeaseDurationType + + // LeaseState contains the information returned from the x-ms-lease-state header response. + LeaseState *LeaseStateType + + // LeaseStatus contains the information returned from the x-ms-lease-status header response. + LeaseStatus *LeaseStatusType + + // Metadata contains the information returned from the x-ms-meta header response. + Metadata map[string]*string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// FileClientGetRangeListResponse contains the response from method FileClient.GetRangeList. +type FileClientGetRangeListResponse struct { + ShareFileRangeList + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag `xml:"ETag"` + + // FileContentLength contains the information returned from the x-ms-content-length header response. + FileContentLength *int64 `xml:"FileContentLength"` + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time `xml:"LastModified"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// FileClientListHandlesResponse contains the response from method FileClient.ListHandles. +type FileClientListHandlesResponse struct { + ListHandlesResponse + // ContentType contains the information returned from the Content-Type header response. + ContentType *string `xml:"ContentType"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// FileClientReleaseLeaseResponse contains the response from method FileClient.ReleaseLease. +type FileClientReleaseLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// FileClientSetHTTPHeadersResponse contains the response from method FileClient.SetHTTPHeaders. +type FileClientSetHTTPHeadersResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // FileAttributes contains the information returned from the x-ms-file-attributes header response. + FileAttributes *string + + // FileChangeTime contains the information returned from the x-ms-file-change-time header response. + FileChangeTime *time.Time + + // FileCreationTime contains the information returned from the x-ms-file-creation-time header response. + FileCreationTime *time.Time + + // ID contains the information returned from the x-ms-file-id header response. + ID *string + + // FileLastWriteTime contains the information returned from the x-ms-file-last-write-time header response. + FileLastWriteTime *time.Time + + // ParentID contains the information returned from the x-ms-file-parent-id header response. + ParentID *string + + // FilePermissionKey contains the information returned from the x-ms-file-permission-key header response. + FilePermissionKey *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// FileClientSetMetadataResponse contains the response from method FileClient.SetMetadata. +type FileClientSetMetadataResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// FileClientStartCopyResponse contains the response from method FileClient.StartCopy. +type FileClientStartCopyResponse struct { + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *CopyStatusType + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// FileClientUploadRangeFromURLResponse contains the response from method FileClient.UploadRangeFromURL. +type FileClientUploadRangeFromURLResponse struct { + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + XMSContentCRC64 []byte +} + +// FileClientUploadRangeResponse contains the response from method FileClient.UploadRange. +type FileClientUploadRangeResponse struct { + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ServiceClientGetPropertiesResponse contains the response from method ServiceClient.GetProperties. +type ServiceClientGetPropertiesResponse struct { + StorageServiceProperties + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// ServiceClientListSharesSegmentResponse contains the response from method ServiceClient.NewListSharesSegmentPager. +type ServiceClientListSharesSegmentResponse struct { + ListSharesResponse + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// ServiceClientSetPropertiesResponse contains the response from method ServiceClient.SetProperties. +type ServiceClientSetPropertiesResponse struct { + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ShareClientAcquireLeaseResponse contains the response from method ShareClient.AcquireLease. +type ShareClientAcquireLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ShareClientBreakLeaseResponse contains the response from method ShareClient.BreakLease. +type ShareClientBreakLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // LeaseTime contains the information returned from the x-ms-lease-time header response. + LeaseTime *int32 + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ShareClientChangeLeaseResponse contains the response from method ShareClient.ChangeLease. +type ShareClientChangeLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ShareClientCreatePermissionResponse contains the response from method ShareClient.CreatePermission. +type ShareClientCreatePermissionResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // FilePermissionKey contains the information returned from the x-ms-file-permission-key header response. + FilePermissionKey *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ShareClientCreateResponse contains the response from method ShareClient.Create. +type ShareClientCreateResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ShareClientCreateSnapshotResponse contains the response from method ShareClient.CreateSnapshot. +type ShareClientCreateSnapshotResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Snapshot contains the information returned from the x-ms-snapshot header response. + Snapshot *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ShareClientDeleteResponse contains the response from method ShareClient.Delete. +type ShareClientDeleteResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ShareClientGetAccessPolicyResponse contains the response from method ShareClient.GetAccessPolicy. +type ShareClientGetAccessPolicyResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag `xml:"ETag"` + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time `xml:"LastModified"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // A collection of signed identifiers. + SignedIdentifiers []*SignedIdentifier `xml:"SignedIdentifier"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// ShareClientGetPermissionResponse contains the response from method ShareClient.GetPermission. +type ShareClientGetPermissionResponse struct { + SharePermission + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ShareClientGetPropertiesResponse contains the response from method ShareClient.GetProperties. +type ShareClientGetPropertiesResponse struct { + // AccessTier contains the information returned from the x-ms-access-tier header response. + AccessTier *string + + // AccessTierChangeTime contains the information returned from the x-ms-access-tier-change-time header response. + AccessTierChangeTime *time.Time + + // AccessTierTransitionState contains the information returned from the x-ms-access-tier-transition-state header response. + AccessTierTransitionState *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // EnabledProtocols contains the information returned from the x-ms-enabled-protocols header response. + EnabledProtocols *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseDuration contains the information returned from the x-ms-lease-duration header response. + LeaseDuration *LeaseDurationType + + // LeaseState contains the information returned from the x-ms-lease-state header response. + LeaseState *LeaseStateType + + // LeaseStatus contains the information returned from the x-ms-lease-status header response. + LeaseStatus *LeaseStatusType + + // Metadata contains the information returned from the x-ms-meta header response. + Metadata map[string]*string + + // NextAllowedQuotaDowngradeTime contains the information returned from the x-ms-share-next-allowed-quota-downgrade-time header + // response. + NextAllowedQuotaDowngradeTime *time.Time + + // ProvisionedEgressMBps contains the information returned from the x-ms-share-provisioned-egress-mbps header response. + ProvisionedEgressMBps *int32 + + // ProvisionedIngressMBps contains the information returned from the x-ms-share-provisioned-ingress-mbps header response. + ProvisionedIngressMBps *int32 + + // ProvisionedIops contains the information returned from the x-ms-share-provisioned-iops header response. + ProvisionedIops *int32 + + // Quota contains the information returned from the x-ms-share-quota header response. + Quota *int32 + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // RootSquash contains the information returned from the x-ms-root-squash header response. + RootSquash *ShareRootSquash + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ShareClientGetStatisticsResponse contains the response from method ShareClient.GetStatistics. +type ShareClientGetStatisticsResponse struct { + ShareStats + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag `xml:"ETag"` + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time `xml:"LastModified"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// ShareClientReleaseLeaseResponse contains the response from method ShareClient.ReleaseLease. +type ShareClientReleaseLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ShareClientRenewLeaseResponse contains the response from method ShareClient.RenewLease. +type ShareClientRenewLeaseResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ShareClientRestoreResponse contains the response from method ShareClient.Restore. +type ShareClientRestoreResponse struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ShareClientSetAccessPolicyResponse contains the response from method ShareClient.SetAccessPolicy. +type ShareClientSetAccessPolicyResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ShareClientSetMetadataResponse contains the response from method ShareClient.SetMetadata. +type ShareClientSetMetadataResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// ShareClientSetPropertiesResponse contains the response from method ShareClient.SetProperties. +type ShareClientSetPropertiesResponse struct { + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *azcore.ETag + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} diff --git a/sdk/storage/azfile/internal/generated/zz_service_client.go b/sdk/storage/azfile/internal/generated/zz_service_client.go new file mode 100644 index 000000000000..efd5f4708912 --- /dev/null +++ b/sdk/storage/azfile/internal/generated/zz_service_client.go @@ -0,0 +1,195 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "context" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "strconv" + "strings" +) + +// ServiceClient contains the methods for the Service group. +// Don't use this type directly, use NewServiceClient() instead. +type ServiceClient struct { + endpoint string + pl runtime.Pipeline +} + +// NewServiceClient creates a new instance of ServiceClient with the specified values. +// - endpoint - The URL of the service account, share, directory or file that is the target of the desired operation. +// - pl - the pipeline used for sending requests and handling responses. +func NewServiceClient(endpoint string, pl runtime.Pipeline) *ServiceClient { + client := &ServiceClient{ + endpoint: endpoint, + pl: pl, + } + return client +} + +// GetProperties - Gets the properties of a storage account's File service, including properties for Storage Analytics metrics +// and CORS (Cross-Origin Resource Sharing) rules. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method. +func (client *ServiceClient) GetProperties(ctx context.Context, options *ServiceClientGetPropertiesOptions) (ServiceClientGetPropertiesResponse, error) { + req, err := client.getPropertiesCreateRequest(ctx, options) + if err != nil { + return ServiceClientGetPropertiesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ServiceClientGetPropertiesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ServiceClientGetPropertiesResponse{}, runtime.NewResponseError(resp) + } + return client.getPropertiesHandleResponse(resp) +} + +// getPropertiesCreateRequest creates the GetProperties request. +func (client *ServiceClient) getPropertiesCreateRequest(ctx context.Context, options *ServiceClientGetPropertiesOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "service") + reqQP.Set("comp", "properties") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getPropertiesHandleResponse handles the GetProperties response. +func (client *ServiceClient) getPropertiesHandleResponse(resp *http.Response) (ServiceClientGetPropertiesResponse, error) { + result := ServiceClientGetPropertiesResponse{} + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if err := runtime.UnmarshalAsXML(resp, &result.StorageServiceProperties); err != nil { + return ServiceClientGetPropertiesResponse{}, err + } + return result, nil +} + +// NewListSharesSegmentPager - The List Shares Segment operation returns a list of the shares and share snapshots under the +// specified account. +// +// Generated from API version 2020-10-02 +// - options - ServiceClientListSharesSegmentOptions contains the optional parameters for the ServiceClient.NewListSharesSegmentPager +// method. +// +// listSharesSegmentCreateRequest creates the ListSharesSegment request. +func (client *ServiceClient) ListSharesSegmentCreateRequest(ctx context.Context, options *ServiceClientListSharesSegmentOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "list") + if options != nil && options.Prefix != nil { + reqQP.Set("prefix", *options.Prefix) + } + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// listSharesSegmentHandleResponse handles the ListSharesSegment response. +func (client *ServiceClient) ListSharesSegmentHandleResponse(resp *http.Response) (ServiceClientListSharesSegmentResponse, error) { + result := ServiceClientListSharesSegmentResponse{} + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if err := runtime.UnmarshalAsXML(resp, &result.ListSharesResponse); err != nil { + return ServiceClientListSharesSegmentResponse{}, err + } + return result, nil +} + +// SetProperties - Sets properties for a storage account's File service endpoint, including properties for Storage Analytics +// metrics and CORS (Cross-Origin Resource Sharing) rules. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - storageServiceProperties - The StorageService properties. +// - options - ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method. +func (client *ServiceClient) SetProperties(ctx context.Context, storageServiceProperties StorageServiceProperties, options *ServiceClientSetPropertiesOptions) (ServiceClientSetPropertiesResponse, error) { + req, err := client.setPropertiesCreateRequest(ctx, storageServiceProperties, options) + if err != nil { + return ServiceClientSetPropertiesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ServiceClientSetPropertiesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return ServiceClientSetPropertiesResponse{}, runtime.NewResponseError(resp) + } + return client.setPropertiesHandleResponse(resp) +} + +// setPropertiesCreateRequest creates the SetProperties request. +func (client *ServiceClient) setPropertiesCreateRequest(ctx context.Context, storageServiceProperties StorageServiceProperties, options *ServiceClientSetPropertiesOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "service") + reqQP.Set("comp", "properties") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, runtime.MarshalAsXML(req, storageServiceProperties) +} + +// setPropertiesHandleResponse handles the SetProperties response. +func (client *ServiceClient) setPropertiesHandleResponse(resp *http.Response) (ServiceClientSetPropertiesResponse, error) { + result := ServiceClientSetPropertiesResponse{} + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} diff --git a/sdk/storage/azfile/internal/generated/zz_share_client.go b/sdk/storage/azfile/internal/generated/zz_share_client.go new file mode 100644 index 000000000000..1ba2fda44963 --- /dev/null +++ b/sdk/storage/azfile/internal/generated/zz_share_client.go @@ -0,0 +1,1437 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "context" + "encoding/xml" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "net/http" + "strconv" + "strings" + "time" +) + +// ShareClient contains the methods for the Share group. +// Don't use this type directly, use NewShareClient() instead. +type ShareClient struct { + endpoint string + pl runtime.Pipeline +} + +// NewShareClient creates a new instance of ShareClient with the specified values. +// - endpoint - The URL of the service account, share, directory or file that is the target of the desired operation. +// - pl - the pipeline used for sending requests and handling responses. +func NewShareClient(endpoint string, pl runtime.Pipeline) *ShareClient { + client := &ShareClient{ + endpoint: endpoint, + pl: pl, + } + return client +} + +// AcquireLease - The Lease Share operation establishes and manages a lock on a share, or the specified snapshot for set and +// delete share operations. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - duration - Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite +// lease can be between 15 and 60 seconds. A lease duration cannot be changed using +// renew or change. +// - options - ShareClientAcquireLeaseOptions contains the optional parameters for the ShareClient.AcquireLease method. +func (client *ShareClient) AcquireLease(ctx context.Context, duration int32, options *ShareClientAcquireLeaseOptions) (ShareClientAcquireLeaseResponse, error) { + req, err := client.acquireLeaseCreateRequest(ctx, duration, options) + if err != nil { + return ShareClientAcquireLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ShareClientAcquireLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return ShareClientAcquireLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.acquireLeaseHandleResponse(resp) +} + +// acquireLeaseCreateRequest creates the AcquireLease request. +func (client *ShareClient) acquireLeaseCreateRequest(ctx context.Context, duration int32, options *ShareClientAcquireLeaseOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + reqQP.Set("restype", "share") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.Sharesnapshot != nil { + reqQP.Set("sharesnapshot", *options.Sharesnapshot) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"acquire"} + req.Raw().Header["x-ms-lease-duration"] = []string{strconv.FormatInt(int64(duration), 10)} + if options != nil && options.ProposedLeaseID != nil { + req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// acquireLeaseHandleResponse handles the AcquireLease response. +func (client *ShareClient) acquireLeaseHandleResponse(resp *http.Response) (ShareClientAcquireLeaseResponse, error) { + result := ShareClientAcquireLeaseResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientAcquireLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientAcquireLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// BreakLease - The Lease Share operation establishes and manages a lock on a share, or the specified snapshot for set and +// delete share operations. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - ShareClientBreakLeaseOptions contains the optional parameters for the ShareClient.BreakLease method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ShareClient.GetProperties method. +func (client *ShareClient) BreakLease(ctx context.Context, options *ShareClientBreakLeaseOptions, leaseAccessConditions *LeaseAccessConditions) (ShareClientBreakLeaseResponse, error) { + req, err := client.breakLeaseCreateRequest(ctx, options, leaseAccessConditions) + if err != nil { + return ShareClientBreakLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ShareClientBreakLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return ShareClientBreakLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.breakLeaseHandleResponse(resp) +} + +// breakLeaseCreateRequest creates the BreakLease request. +func (client *ShareClient) breakLeaseCreateRequest(ctx context.Context, options *ShareClientBreakLeaseOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + reqQP.Set("restype", "share") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.Sharesnapshot != nil { + reqQP.Set("sharesnapshot", *options.Sharesnapshot) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"break"} + if options != nil && options.BreakPeriod != nil { + req.Raw().Header["x-ms-lease-break-period"] = []string{strconv.FormatInt(int64(*options.BreakPeriod), 10)} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// breakLeaseHandleResponse handles the BreakLease response. +func (client *ShareClient) breakLeaseHandleResponse(resp *http.Response) (ShareClientBreakLeaseResponse, error) { + result := ShareClientBreakLeaseResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientBreakLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-time"); val != "" { + leaseTime32, err := strconv.ParseInt(val, 10, 32) + leaseTime := int32(leaseTime32) + if err != nil { + return ShareClientBreakLeaseResponse{}, err + } + result.LeaseTime = &leaseTime + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientBreakLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// ChangeLease - The Lease Share operation establishes and manages a lock on a share, or the specified snapshot for set and +// delete share operations. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - leaseID - Specifies the current lease ID on the resource. +// - options - ShareClientChangeLeaseOptions contains the optional parameters for the ShareClient.ChangeLease method. +func (client *ShareClient) ChangeLease(ctx context.Context, leaseID string, options *ShareClientChangeLeaseOptions) (ShareClientChangeLeaseResponse, error) { + req, err := client.changeLeaseCreateRequest(ctx, leaseID, options) + if err != nil { + return ShareClientChangeLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ShareClientChangeLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ShareClientChangeLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.changeLeaseHandleResponse(resp) +} + +// changeLeaseCreateRequest creates the ChangeLease request. +func (client *ShareClient) changeLeaseCreateRequest(ctx context.Context, leaseID string, options *ShareClientChangeLeaseOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + reqQP.Set("restype", "share") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.Sharesnapshot != nil { + reqQP.Set("sharesnapshot", *options.Sharesnapshot) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"change"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + if options != nil && options.ProposedLeaseID != nil { + req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// changeLeaseHandleResponse handles the ChangeLease response. +func (client *ShareClient) changeLeaseHandleResponse(resp *http.Response) (ShareClientChangeLeaseResponse, error) { + result := ShareClientChangeLeaseResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientChangeLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientChangeLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// Create - Creates a new share under the specified account. If the share with the same name already exists, the operation +// fails. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - ShareClientCreateOptions contains the optional parameters for the ShareClient.Create method. +func (client *ShareClient) Create(ctx context.Context, options *ShareClientCreateOptions) (ShareClientCreateResponse, error) { + req, err := client.createCreateRequest(ctx, options) + if err != nil { + return ShareClientCreateResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ShareClientCreateResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return ShareClientCreateResponse{}, runtime.NewResponseError(resp) + } + return client.createHandleResponse(resp) +} + +// createCreateRequest creates the Create request. +func (client *ShareClient) createCreateRequest(ctx context.Context, options *ShareClientCreateOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "share") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + if options != nil && options.Quota != nil { + req.Raw().Header["x-ms-share-quota"] = []string{strconv.FormatInt(int64(*options.Quota), 10)} + } + if options != nil && options.AccessTier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.AccessTier)} + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.EnabledProtocols != nil { + req.Raw().Header["x-ms-enabled-protocols"] = []string{*options.EnabledProtocols} + } + if options != nil && options.RootSquash != nil { + req.Raw().Header["x-ms-root-squash"] = []string{string(*options.RootSquash)} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// createHandleResponse handles the Create response. +func (client *ShareClient) createHandleResponse(resp *http.Response) (ShareClientCreateResponse, error) { + result := ShareClientCreateResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientCreateResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientCreateResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// CreatePermission - Create a permission (a security descriptor). +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - sharePermission - A permission (a security descriptor) at the share level. +// - options - ShareClientCreatePermissionOptions contains the optional parameters for the ShareClient.CreatePermission method. +func (client *ShareClient) CreatePermission(ctx context.Context, sharePermission SharePermission, options *ShareClientCreatePermissionOptions) (ShareClientCreatePermissionResponse, error) { + req, err := client.createPermissionCreateRequest(ctx, sharePermission, options) + if err != nil { + return ShareClientCreatePermissionResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ShareClientCreatePermissionResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return ShareClientCreatePermissionResponse{}, runtime.NewResponseError(resp) + } + return client.createPermissionHandleResponse(resp) +} + +// createPermissionCreateRequest creates the CreatePermission request. +func (client *ShareClient) createPermissionCreateRequest(ctx context.Context, sharePermission SharePermission, options *ShareClientCreatePermissionOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "share") + reqQP.Set("comp", "filepermission") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, runtime.MarshalAsJSON(req, sharePermission) +} + +// createPermissionHandleResponse handles the CreatePermission response. +func (client *ShareClient) createPermissionHandleResponse(resp *http.Response) (ShareClientCreatePermissionResponse, error) { + result := ShareClientCreatePermissionResponse{} + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientCreatePermissionResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-file-permission-key"); val != "" { + result.FilePermissionKey = &val + } + return result, nil +} + +// CreateSnapshot - Creates a read-only snapshot of a share. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - ShareClientCreateSnapshotOptions contains the optional parameters for the ShareClient.CreateSnapshot method. +func (client *ShareClient) CreateSnapshot(ctx context.Context, options *ShareClientCreateSnapshotOptions) (ShareClientCreateSnapshotResponse, error) { + req, err := client.createSnapshotCreateRequest(ctx, options) + if err != nil { + return ShareClientCreateSnapshotResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ShareClientCreateSnapshotResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return ShareClientCreateSnapshotResponse{}, runtime.NewResponseError(resp) + } + return client.createSnapshotHandleResponse(resp) +} + +// createSnapshotCreateRequest creates the CreateSnapshot request. +func (client *ShareClient) createSnapshotCreateRequest(ctx context.Context, options *ShareClientCreateSnapshotOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "share") + reqQP.Set("comp", "snapshot") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// createSnapshotHandleResponse handles the CreateSnapshot response. +func (client *ShareClient) createSnapshotHandleResponse(resp *http.Response) (ShareClientCreateSnapshotResponse, error) { + result := ShareClientCreateSnapshotResponse{} + if val := resp.Header.Get("x-ms-snapshot"); val != "" { + result.Snapshot = &val + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientCreateSnapshotResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientCreateSnapshotResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// Delete - Operation marks the specified share or share snapshot for deletion. The share or share snapshot and any files +// contained within it are later deleted during garbage collection. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - ShareClientDeleteOptions contains the optional parameters for the ShareClient.Delete method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ShareClient.GetProperties method. +func (client *ShareClient) Delete(ctx context.Context, options *ShareClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions) (ShareClientDeleteResponse, error) { + req, err := client.deleteCreateRequest(ctx, options, leaseAccessConditions) + if err != nil { + return ShareClientDeleteResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ShareClientDeleteResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return ShareClientDeleteResponse{}, runtime.NewResponseError(resp) + } + return client.deleteHandleResponse(resp) +} + +// deleteCreateRequest creates the Delete request. +func (client *ShareClient) deleteCreateRequest(ctx context.Context, options *ShareClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodDelete, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "share") + if options != nil && options.Sharesnapshot != nil { + reqQP.Set("sharesnapshot", *options.Sharesnapshot) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.DeleteSnapshots != nil { + req.Raw().Header["x-ms-delete-snapshots"] = []string{string(*options.DeleteSnapshots)} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// deleteHandleResponse handles the Delete response. +func (client *ShareClient) deleteHandleResponse(resp *http.Response) (ShareClientDeleteResponse, error) { + result := ShareClientDeleteResponse{} + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientDeleteResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// GetAccessPolicy - Returns information about stored access policies specified on the share. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - ShareClientGetAccessPolicyOptions contains the optional parameters for the ShareClient.GetAccessPolicy method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ShareClient.GetProperties method. +func (client *ShareClient) GetAccessPolicy(ctx context.Context, options *ShareClientGetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions) (ShareClientGetAccessPolicyResponse, error) { + req, err := client.getAccessPolicyCreateRequest(ctx, options, leaseAccessConditions) + if err != nil { + return ShareClientGetAccessPolicyResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ShareClientGetAccessPolicyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ShareClientGetAccessPolicyResponse{}, runtime.NewResponseError(resp) + } + return client.getAccessPolicyHandleResponse(resp) +} + +// getAccessPolicyCreateRequest creates the GetAccessPolicy request. +func (client *ShareClient) getAccessPolicyCreateRequest(ctx context.Context, options *ShareClientGetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "share") + reqQP.Set("comp", "acl") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getAccessPolicyHandleResponse handles the GetAccessPolicy response. +func (client *ShareClient) getAccessPolicyHandleResponse(resp *http.Response) (ShareClientGetAccessPolicyResponse, error) { + result := ShareClientGetAccessPolicyResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientGetAccessPolicyResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientGetAccessPolicyResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result); err != nil { + return ShareClientGetAccessPolicyResponse{}, err + } + return result, nil +} + +// GetPermission - Returns the permission (security descriptor) for a given key +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - filePermissionKey - Key of the permission to be set for the directory/file. +// - options - ShareClientGetPermissionOptions contains the optional parameters for the ShareClient.GetPermission method. +func (client *ShareClient) GetPermission(ctx context.Context, filePermissionKey string, options *ShareClientGetPermissionOptions) (ShareClientGetPermissionResponse, error) { + req, err := client.getPermissionCreateRequest(ctx, filePermissionKey, options) + if err != nil { + return ShareClientGetPermissionResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ShareClientGetPermissionResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ShareClientGetPermissionResponse{}, runtime.NewResponseError(resp) + } + return client.getPermissionHandleResponse(resp) +} + +// getPermissionCreateRequest creates the GetPermission request. +func (client *ShareClient) getPermissionCreateRequest(ctx context.Context, filePermissionKey string, options *ShareClientGetPermissionOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "share") + reqQP.Set("comp", "filepermission") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-file-permission-key"] = []string{filePermissionKey} + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["Accept"] = []string{"application/json"} + return req, nil +} + +// getPermissionHandleResponse handles the GetPermission response. +func (client *ShareClient) getPermissionHandleResponse(resp *http.Response) (ShareClientGetPermissionResponse, error) { + result := ShareClientGetPermissionResponse{} + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientGetPermissionResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsJSON(resp, &result.SharePermission); err != nil { + return ShareClientGetPermissionResponse{}, err + } + return result, nil +} + +// GetProperties - Returns all user-defined metadata and system properties for the specified share or share snapshot. The +// data returned does not include the share's list of files. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - ShareClientGetPropertiesOptions contains the optional parameters for the ShareClient.GetProperties method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ShareClient.GetProperties method. +func (client *ShareClient) GetProperties(ctx context.Context, options *ShareClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (ShareClientGetPropertiesResponse, error) { + req, err := client.getPropertiesCreateRequest(ctx, options, leaseAccessConditions) + if err != nil { + return ShareClientGetPropertiesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ShareClientGetPropertiesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ShareClientGetPropertiesResponse{}, runtime.NewResponseError(resp) + } + return client.getPropertiesHandleResponse(resp) +} + +// getPropertiesCreateRequest creates the GetProperties request. +func (client *ShareClient) getPropertiesCreateRequest(ctx context.Context, options *ShareClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "share") + if options != nil && options.Sharesnapshot != nil { + reqQP.Set("sharesnapshot", *options.Sharesnapshot) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getPropertiesHandleResponse handles the GetProperties response. +func (client *ShareClient) getPropertiesHandleResponse(resp *http.Response) (ShareClientGetPropertiesResponse, error) { + result := ShareClientGetPropertiesResponse{} + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientGetPropertiesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientGetPropertiesResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-share-quota"); val != "" { + quota32, err := strconv.ParseInt(val, 10, 32) + quota := int32(quota32) + if err != nil { + return ShareClientGetPropertiesResponse{}, err + } + result.Quota = "a + } + if val := resp.Header.Get("x-ms-share-provisioned-iops"); val != "" { + provisionedIops32, err := strconv.ParseInt(val, 10, 32) + provisionedIops := int32(provisionedIops32) + if err != nil { + return ShareClientGetPropertiesResponse{}, err + } + result.ProvisionedIops = &provisionedIops + } + if val := resp.Header.Get("x-ms-share-provisioned-ingress-mbps"); val != "" { + provisionedIngressMBps32, err := strconv.ParseInt(val, 10, 32) + provisionedIngressMBps := int32(provisionedIngressMBps32) + if err != nil { + return ShareClientGetPropertiesResponse{}, err + } + result.ProvisionedIngressMBps = &provisionedIngressMBps + } + if val := resp.Header.Get("x-ms-share-provisioned-egress-mbps"); val != "" { + provisionedEgressMBps32, err := strconv.ParseInt(val, 10, 32) + provisionedEgressMBps := int32(provisionedEgressMBps32) + if err != nil { + return ShareClientGetPropertiesResponse{}, err + } + result.ProvisionedEgressMBps = &provisionedEgressMBps + } + if val := resp.Header.Get("x-ms-share-next-allowed-quota-downgrade-time"); val != "" { + nextAllowedQuotaDowngradeTime, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientGetPropertiesResponse{}, err + } + result.NextAllowedQuotaDowngradeTime = &nextAllowedQuotaDowngradeTime + } + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) + } + if val := resp.Header.Get("x-ms-access-tier"); val != "" { + result.AccessTier = &val + } + if val := resp.Header.Get("x-ms-access-tier-change-time"); val != "" { + accessTierChangeTime, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientGetPropertiesResponse{}, err + } + result.AccessTierChangeTime = &accessTierChangeTime + } + if val := resp.Header.Get("x-ms-access-tier-transition-state"); val != "" { + result.AccessTierTransitionState = &val + } + if val := resp.Header.Get("x-ms-enabled-protocols"); val != "" { + result.EnabledProtocols = &val + } + if val := resp.Header.Get("x-ms-root-squash"); val != "" { + result.RootSquash = (*ShareRootSquash)(&val) + } + return result, nil +} + +// GetStatistics - Retrieves statistics related to the share. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - ShareClientGetStatisticsOptions contains the optional parameters for the ShareClient.GetStatistics method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ShareClient.GetProperties method. +func (client *ShareClient) GetStatistics(ctx context.Context, options *ShareClientGetStatisticsOptions, leaseAccessConditions *LeaseAccessConditions) (ShareClientGetStatisticsResponse, error) { + req, err := client.getStatisticsCreateRequest(ctx, options, leaseAccessConditions) + if err != nil { + return ShareClientGetStatisticsResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ShareClientGetStatisticsResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ShareClientGetStatisticsResponse{}, runtime.NewResponseError(resp) + } + return client.getStatisticsHandleResponse(resp) +} + +// getStatisticsCreateRequest creates the GetStatistics request. +func (client *ShareClient) getStatisticsCreateRequest(ctx context.Context, options *ShareClientGetStatisticsOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "share") + reqQP.Set("comp", "stats") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// getStatisticsHandleResponse handles the GetStatistics response. +func (client *ShareClient) getStatisticsHandleResponse(resp *http.Response) (ShareClientGetStatisticsResponse, error) { + result := ShareClientGetStatisticsResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientGetStatisticsResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientGetStatisticsResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.ShareStats); err != nil { + return ShareClientGetStatisticsResponse{}, err + } + return result, nil +} + +// ReleaseLease - The Lease Share operation establishes and manages a lock on a share, or the specified snapshot for set and +// delete share operations. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - leaseID - Specifies the current lease ID on the resource. +// - options - ShareClientReleaseLeaseOptions contains the optional parameters for the ShareClient.ReleaseLease method. +func (client *ShareClient) ReleaseLease(ctx context.Context, leaseID string, options *ShareClientReleaseLeaseOptions) (ShareClientReleaseLeaseResponse, error) { + req, err := client.releaseLeaseCreateRequest(ctx, leaseID, options) + if err != nil { + return ShareClientReleaseLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ShareClientReleaseLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ShareClientReleaseLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.releaseLeaseHandleResponse(resp) +} + +// releaseLeaseCreateRequest creates the ReleaseLease request. +func (client *ShareClient) releaseLeaseCreateRequest(ctx context.Context, leaseID string, options *ShareClientReleaseLeaseOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + reqQP.Set("restype", "share") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.Sharesnapshot != nil { + reqQP.Set("sharesnapshot", *options.Sharesnapshot) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"release"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// releaseLeaseHandleResponse handles the ReleaseLease response. +func (client *ShareClient) releaseLeaseHandleResponse(resp *http.Response) (ShareClientReleaseLeaseResponse, error) { + result := ShareClientReleaseLeaseResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientReleaseLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientReleaseLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// RenewLease - The Lease Share operation establishes and manages a lock on a share, or the specified snapshot for set and +// delete share operations. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - leaseID - Specifies the current lease ID on the resource. +// - options - ShareClientRenewLeaseOptions contains the optional parameters for the ShareClient.RenewLease method. +func (client *ShareClient) RenewLease(ctx context.Context, leaseID string, options *ShareClientRenewLeaseOptions) (ShareClientRenewLeaseResponse, error) { + req, err := client.renewLeaseCreateRequest(ctx, leaseID, options) + if err != nil { + return ShareClientRenewLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ShareClientRenewLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ShareClientRenewLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.renewLeaseHandleResponse(resp) +} + +// renewLeaseCreateRequest creates the RenewLease request. +func (client *ShareClient) renewLeaseCreateRequest(ctx context.Context, leaseID string, options *ShareClientRenewLeaseOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + reqQP.Set("restype", "share") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.Sharesnapshot != nil { + reqQP.Set("sharesnapshot", *options.Sharesnapshot) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-lease-action"] = []string{"renew"} + req.Raw().Header["x-ms-lease-id"] = []string{leaseID} + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// renewLeaseHandleResponse handles the RenewLease response. +func (client *ShareClient) renewLeaseHandleResponse(resp *http.Response) (ShareClientRenewLeaseResponse, error) { + result := ShareClientRenewLeaseResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientRenewLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientRenewLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// Restore - Restores a previously deleted Share. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - ShareClientRestoreOptions contains the optional parameters for the ShareClient.Restore method. +func (client *ShareClient) Restore(ctx context.Context, options *ShareClientRestoreOptions) (ShareClientRestoreResponse, error) { + req, err := client.restoreCreateRequest(ctx, options) + if err != nil { + return ShareClientRestoreResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ShareClientRestoreResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return ShareClientRestoreResponse{}, runtime.NewResponseError(resp) + } + return client.restoreHandleResponse(resp) +} + +// restoreCreateRequest creates the Restore request. +func (client *ShareClient) restoreCreateRequest(ctx context.Context, options *ShareClientRestoreOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "share") + reqQP.Set("comp", "undelete") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + if options != nil && options.DeletedShareName != nil { + req.Raw().Header["x-ms-deleted-share-name"] = []string{*options.DeletedShareName} + } + if options != nil && options.DeletedShareVersion != nil { + req.Raw().Header["x-ms-deleted-share-version"] = []string{*options.DeletedShareVersion} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// restoreHandleResponse handles the Restore response. +func (client *ShareClient) restoreHandleResponse(resp *http.Response) (ShareClientRestoreResponse, error) { + result := ShareClientRestoreResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientRestoreResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientRestoreResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// SetAccessPolicy - Sets a stored access policy for use with shared access signatures. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - shareACL - The ACL for the share. +// - options - ShareClientSetAccessPolicyOptions contains the optional parameters for the ShareClient.SetAccessPolicy method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ShareClient.GetProperties method. +func (client *ShareClient) SetAccessPolicy(ctx context.Context, shareACL []*SignedIdentifier, options *ShareClientSetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions) (ShareClientSetAccessPolicyResponse, error) { + req, err := client.setAccessPolicyCreateRequest(ctx, shareACL, options, leaseAccessConditions) + if err != nil { + return ShareClientSetAccessPolicyResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ShareClientSetAccessPolicyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ShareClientSetAccessPolicyResponse{}, runtime.NewResponseError(resp) + } + return client.setAccessPolicyHandleResponse(resp) +} + +// setAccessPolicyCreateRequest creates the SetAccessPolicy request. +func (client *ShareClient) setAccessPolicyCreateRequest(ctx context.Context, shareACL []*SignedIdentifier, options *ShareClientSetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "share") + reqQP.Set("comp", "acl") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + type wrapper struct { + XMLName xml.Name `xml:"SignedIdentifiers"` + ShareACL *[]*SignedIdentifier `xml:"SignedIdentifier"` + } + return req, runtime.MarshalAsXML(req, wrapper{ShareACL: &shareACL}) +} + +// setAccessPolicyHandleResponse handles the SetAccessPolicy response. +func (client *ShareClient) setAccessPolicyHandleResponse(resp *http.Response) (ShareClientSetAccessPolicyResponse, error) { + result := ShareClientSetAccessPolicyResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientSetAccessPolicyResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientSetAccessPolicyResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// SetMetadata - Sets one or more user-defined name-value pairs for the specified share. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - ShareClientSetMetadataOptions contains the optional parameters for the ShareClient.SetMetadata method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ShareClient.GetProperties method. +func (client *ShareClient) SetMetadata(ctx context.Context, options *ShareClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions) (ShareClientSetMetadataResponse, error) { + req, err := client.setMetadataCreateRequest(ctx, options, leaseAccessConditions) + if err != nil { + return ShareClientSetMetadataResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ShareClientSetMetadataResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ShareClientSetMetadataResponse{}, runtime.NewResponseError(resp) + } + return client.setMetadataHandleResponse(resp) +} + +// setMetadataCreateRequest creates the SetMetadata request. +func (client *ShareClient) setMetadataCreateRequest(ctx context.Context, options *ShareClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "share") + reqQP.Set("comp", "metadata") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if options != nil && options.Metadata != nil { + for k, v := range options.Metadata { + if v != nil { + req.Raw().Header["x-ms-meta-"+k] = []string{*v} + } + } + } + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// setMetadataHandleResponse handles the SetMetadata response. +func (client *ShareClient) setMetadataHandleResponse(resp *http.Response) (ShareClientSetMetadataResponse, error) { + result := ShareClientSetMetadataResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientSetMetadataResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientSetMetadataResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// SetProperties - Sets properties for the specified share. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2020-10-02 +// - options - ShareClientSetPropertiesOptions contains the optional parameters for the ShareClient.SetProperties method. +// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ShareClient.GetProperties method. +func (client *ShareClient) SetProperties(ctx context.Context, options *ShareClientSetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (ShareClientSetPropertiesResponse, error) { + req, err := client.setPropertiesCreateRequest(ctx, options, leaseAccessConditions) + if err != nil { + return ShareClientSetPropertiesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return ShareClientSetPropertiesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ShareClientSetPropertiesResponse{}, runtime.NewResponseError(resp) + } + return client.setPropertiesHandleResponse(resp) +} + +// setPropertiesCreateRequest creates the SetProperties request. +func (client *ShareClient) setPropertiesCreateRequest(ctx context.Context, options *ShareClientSetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "share") + reqQP.Set("comp", "properties") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + if options != nil && options.Quota != nil { + req.Raw().Header["x-ms-share-quota"] = []string{strconv.FormatInt(int64(*options.Quota), 10)} + } + if options != nil && options.AccessTier != nil { + req.Raw().Header["x-ms-access-tier"] = []string{string(*options.AccessTier)} + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} + } + if options != nil && options.RootSquash != nil { + req.Raw().Header["x-ms-root-squash"] = []string{string(*options.RootSquash)} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// setPropertiesHandleResponse handles the SetProperties response. +func (client *ShareClient) setPropertiesHandleResponse(resp *http.Response) (ShareClientSetPropertiesResponse, error) { + result := ShareClientSetPropertiesResponse{} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientSetPropertiesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ShareClientSetPropertiesResponse{}, err + } + result.Date = &date + } + return result, nil +} diff --git a/sdk/storage/azfile/internal/generated/zz_time_rfc1123.go b/sdk/storage/azfile/internal/generated/zz_time_rfc1123.go new file mode 100644 index 000000000000..4b4d51aa3994 --- /dev/null +++ b/sdk/storage/azfile/internal/generated/zz_time_rfc1123.go @@ -0,0 +1,43 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "strings" + "time" +) + +const ( + rfc1123JSON = `"` + time.RFC1123 + `"` +) + +type timeRFC1123 time.Time + +func (t timeRFC1123) MarshalJSON() ([]byte, error) { + b := []byte(time.Time(t).Format(rfc1123JSON)) + return b, nil +} + +func (t timeRFC1123) MarshalText() ([]byte, error) { + b := []byte(time.Time(t).Format(time.RFC1123)) + return b, nil +} + +func (t *timeRFC1123) UnmarshalJSON(data []byte) error { + p, err := time.Parse(rfc1123JSON, strings.ToUpper(string(data))) + *t = timeRFC1123(p) + return err +} + +func (t *timeRFC1123) UnmarshalText(data []byte) error { + p, err := time.Parse(time.RFC1123, string(data)) + *t = timeRFC1123(p) + return err +} diff --git a/sdk/storage/azfile/internal/generated/zz_time_rfc3339.go b/sdk/storage/azfile/internal/generated/zz_time_rfc3339.go new file mode 100644 index 000000000000..1ce9d621164e --- /dev/null +++ b/sdk/storage/azfile/internal/generated/zz_time_rfc3339.go @@ -0,0 +1,59 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "regexp" + "strings" + "time" +) + +const ( + utcLayoutJSON = `"2006-01-02T15:04:05.999999999"` + utcLayout = "2006-01-02T15:04:05.999999999" + rfc3339JSON = `"` + time.RFC3339Nano + `"` +) + +// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. +var tzOffsetRegex = regexp.MustCompile(`(Z|z|\+|-)(\d+:\d+)*"*$`) + +type timeRFC3339 time.Time + +func (t timeRFC3339) MarshalJSON() (json []byte, err error) { + tt := time.Time(t) + return tt.MarshalJSON() +} + +func (t timeRFC3339) MarshalText() (text []byte, err error) { + tt := time.Time(t) + return tt.MarshalText() +} + +func (t *timeRFC3339) UnmarshalJSON(data []byte) error { + layout := utcLayoutJSON + if tzOffsetRegex.Match(data) { + layout = rfc3339JSON + } + return t.Parse(layout, string(data)) +} + +func (t *timeRFC3339) UnmarshalText(data []byte) (err error) { + layout := utcLayout + if tzOffsetRegex.Match(data) { + layout = time.RFC3339Nano + } + return t.Parse(layout, string(data)) +} + +func (t *timeRFC3339) Parse(layout, value string) error { + p, err := time.Parse(layout, strings.ToUpper(value)) + *t = timeRFC3339(p) + return err +} diff --git a/sdk/storage/azfile/internal/generated/zz_xml_helper.go b/sdk/storage/azfile/internal/generated/zz_xml_helper.go new file mode 100644 index 000000000000..144ea18e1aba --- /dev/null +++ b/sdk/storage/azfile/internal/generated/zz_xml_helper.go @@ -0,0 +1,41 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. +// DO NOT EDIT. + +package generated + +import ( + "encoding/xml" + "strings" +) + +type additionalProperties map[string]*string + +// UnmarshalXML implements the xml.Unmarshaler interface for additionalProperties. +func (ap *additionalProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + tokName := "" + for t, err := d.Token(); err == nil; t, err = d.Token() { + switch tt := t.(type) { + case xml.StartElement: + tokName = strings.ToLower(tt.Name.Local) + break + case xml.CharData: + if tokName == "" { + continue + } + if *ap == nil { + *ap = additionalProperties{} + } + s := string(tt) + (*ap)[tokName] = &s + tokName = "" + break + } + } + return nil +} diff --git a/sdk/storage/azfile/internal/shared/batch_transfer.go b/sdk/storage/azfile/internal/shared/batch_transfer.go new file mode 100644 index 000000000000..ec5541bfbb13 --- /dev/null +++ b/sdk/storage/azfile/internal/shared/batch_transfer.go @@ -0,0 +1,77 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "context" + "errors" +) + +// BatchTransferOptions identifies options used by doBatchTransfer. +type BatchTransferOptions struct { + TransferSize int64 + ChunkSize int64 + Concurrency uint16 + Operation func(ctx context.Context, offset int64, chunkSize int64) error + OperationName string +} + +// DoBatchTransfer helps to execute operations in a batch manner. +// Can be used by users to customize batch works (for other scenarios that the SDK does not provide) +func DoBatchTransfer(ctx context.Context, o *BatchTransferOptions) error { + if o.ChunkSize == 0 { + return errors.New("ChunkSize cannot be 0") + } + + if o.Concurrency == 0 { + o.Concurrency = 5 // default concurrency + } + + // Prepare and do parallel operations. + numChunks := uint16(((o.TransferSize - 1) / o.ChunkSize) + 1) + operationChannel := make(chan func() error, o.Concurrency) // Create the channel that release 'concurrency' goroutines concurrently + operationResponseChannel := make(chan error, numChunks) // Holds each response + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // Create the goroutines that process each operation (in parallel). + for g := uint16(0); g < o.Concurrency; g++ { + //grIndex := g + go func() { + for f := range operationChannel { + err := f() + operationResponseChannel <- err + } + }() + } + + // Add each chunk's operation to the channel. + for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ { + curChunkSize := o.ChunkSize + + if chunkNum == numChunks-1 { // Last chunk + curChunkSize = o.TransferSize - (int64(chunkNum) * o.ChunkSize) // Remove size of all transferred chunks from total + } + offset := int64(chunkNum) * o.ChunkSize + operationChannel <- func() error { + return o.Operation(ctx, offset, curChunkSize) + } + } + close(operationChannel) + + // Wait for the operations to complete. + var firstErr error = nil + for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ { + responseError := <-operationResponseChannel + // record the first error (the original error which should cause the other chunks to fail with canceled context) + if responseError != nil && firstErr == nil { + cancel() // As soon as any operation fails, cancel all remaining operation calls + firstErr = responseError + } + } + return firstErr +} diff --git a/sdk/storage/azfile/internal/shared/bytes_writer.go b/sdk/storage/azfile/internal/shared/bytes_writer.go new file mode 100644 index 000000000000..8d4d35bdeffd --- /dev/null +++ b/sdk/storage/azfile/internal/shared/bytes_writer.go @@ -0,0 +1,30 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "errors" +) + +type bytesWriter []byte + +func NewBytesWriter(b []byte) bytesWriter { + return b +} + +func (c bytesWriter) WriteAt(b []byte, off int64) (int, error) { + if off >= int64(len(c)) || off < 0 { + return 0, errors.New("offset value is out of range") + } + + n := copy(c[int(off):], b) + if n < len(b) { + return n, errors.New("not enough space for all bytes") + } + + return n, nil +} diff --git a/sdk/storage/azfile/internal/shared/bytes_writer_test.go b/sdk/storage/azfile/internal/shared/bytes_writer_test.go new file mode 100644 index 000000000000..5f1bc53c29ca --- /dev/null +++ b/sdk/storage/azfile/internal/shared/bytes_writer_test.go @@ -0,0 +1,37 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestBytesWriter(t *testing.T) { + b := make([]byte, 10) + buffer := NewBytesWriter(b) + + count, err := buffer.WriteAt([]byte{1, 2}, 10) + require.Contains(t, err.Error(), "offset value is out of range") + require.Equal(t, count, 0) + + count, err = buffer.WriteAt([]byte{1, 2}, -1) + require.Contains(t, err.Error(), "offset value is out of range") + require.Equal(t, count, 0) + + count, err = buffer.WriteAt([]byte{1, 2}, 9) + require.Contains(t, err.Error(), "not enough space for all bytes") + require.Equal(t, count, 1) + require.Equal(t, bytes.Compare(b, []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 1}), 0) + + count, err = buffer.WriteAt([]byte{1, 2}, 8) + require.NoError(t, err) + require.Equal(t, count, 2) + require.Equal(t, bytes.Compare(b, []byte{0, 0, 0, 0, 0, 0, 0, 0, 1, 2}), 0) +} diff --git a/sdk/storage/azfile/internal/shared/section_writer.go b/sdk/storage/azfile/internal/shared/section_writer.go new file mode 100644 index 000000000000..c8528a2e3ed2 --- /dev/null +++ b/sdk/storage/azfile/internal/shared/section_writer.go @@ -0,0 +1,53 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "errors" + "io" +) + +type SectionWriter struct { + Count int64 + Offset int64 + Position int64 + WriterAt io.WriterAt +} + +func NewSectionWriter(c io.WriterAt, off int64, count int64) *SectionWriter { + return &SectionWriter{ + Count: count, + Offset: off, + WriterAt: c, + } +} + +func (c *SectionWriter) Write(p []byte) (int, error) { + remaining := c.Count - c.Position + + if remaining <= 0 { + return 0, errors.New("end of section reached") + } + + slice := p + + if int64(len(slice)) > remaining { + slice = slice[:remaining] + } + + n, err := c.WriterAt.WriteAt(slice, c.Offset+c.Position) + c.Position += int64(n) + if err != nil { + return n, err + } + + if len(p) > n { + return n, errors.New("not enough space for all bytes") + } + + return n, nil +} diff --git a/sdk/storage/azfile/internal/shared/section_writer_test.go b/sdk/storage/azfile/internal/shared/section_writer_test.go new file mode 100644 index 000000000000..a1cf22da410a --- /dev/null +++ b/sdk/storage/azfile/internal/shared/section_writer_test.go @@ -0,0 +1,98 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "bytes" + "io" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestSectionWriter(t *testing.T) { + b := [10]byte{} + buffer := NewBytesWriter(b[:]) + + section := NewSectionWriter(buffer, 0, 5) + require.Equal(t, section.Count, int64(5)) + require.Equal(t, section.Offset, int64(0)) + require.Equal(t, section.Position, int64(0)) + + count, err := section.Write([]byte{1, 2, 3}) + require.NoError(t, err) + require.Equal(t, count, 3) + require.Equal(t, section.Position, int64(3)) + require.Equal(t, b, [10]byte{1, 2, 3, 0, 0, 0, 0, 0, 0, 0}) + + count, err = section.Write([]byte{4, 5, 6}) + require.Contains(t, err.Error(), "not enough space for all bytes") + require.Equal(t, count, 2) + require.Equal(t, section.Position, int64(5)) + require.Equal(t, b, [10]byte{1, 2, 3, 4, 5, 0, 0, 0, 0, 0}) + + count, err = section.Write([]byte{6, 7, 8}) + require.Contains(t, err.Error(), "end of section reached") + require.Equal(t, count, 0) + require.Equal(t, section.Position, int64(5)) + require.Equal(t, b, [10]byte{1, 2, 3, 4, 5, 0, 0, 0, 0, 0}) + + // Intentionally create a section writer which will attempt to write + // outside the bounds of the buffer. + section = NewSectionWriter(buffer, 5, 6) + require.Equal(t, section.Count, int64(6)) + require.Equal(t, section.Offset, int64(5)) + require.Equal(t, section.Position, int64(0)) + + count, err = section.Write([]byte{6, 7, 8}) + require.NoError(t, err) + require.Equal(t, count, 3) + require.Equal(t, section.Position, int64(3)) + require.Equal(t, b, [10]byte{1, 2, 3, 4, 5, 6, 7, 8, 0, 0}) + + // Attempt to write past the end of the section. Since the underlying + // buffer rejects the write it gives the same error as in the normal case. + count, err = section.Write([]byte{9, 10, 11}) + require.Contains(t, err.Error(), "not enough space for all bytes") + require.Equal(t, count, 2) + require.Equal(t, section.Position, int64(5)) + require.Equal(t, b, [10]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) + + // Attempt to write past the end of the buffer. In this case the buffer + // rejects the write completely since it falls completely out of bounds. + count, err = section.Write([]byte{11, 12, 13}) + require.Contains(t, err.Error(), "offset value is out of range") + require.Equal(t, count, 0) + require.Equal(t, section.Position, int64(5)) + require.Equal(t, b, [10]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) +} + +func TestSectionWriterCopySrcDestEmpty(t *testing.T) { + input := make([]byte, 0) + reader := bytes.NewReader(input) + + output := make([]byte, 0) + buffer := NewBytesWriter(output) + section := NewSectionWriter(buffer, 0, 0) + + count, err := io.Copy(section, reader) + require.NoError(t, err) + require.Equal(t, count, int64(0)) +} + +func TestSectionWriterCopyDestEmpty(t *testing.T) { + input := make([]byte, 10) + reader := bytes.NewReader(input) + + output := make([]byte, 0) + buffer := NewBytesWriter(output) + section := NewSectionWriter(buffer, 0, 0) + + count, err := io.Copy(section, reader) + require.Contains(t, err.Error(), "end of section reached") + require.Equal(t, count, int64(0)) +} diff --git a/sdk/storage/azfile/internal/shared/shared.go b/sdk/storage/azfile/internal/shared/shared.go new file mode 100644 index 000000000000..9ef2a3396816 --- /dev/null +++ b/sdk/storage/azfile/internal/shared/shared.go @@ -0,0 +1,209 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "errors" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" + "hash/crc64" + "io" + "net" + "strings" +) + +const ( + TokenScope = "https://storage.azure.com/.default" +) + +const ( + HeaderAuthorization = "Authorization" + HeaderXmsDate = "x-ms-date" + HeaderContentLength = "Content-Length" + HeaderContentEncoding = "Content-Encoding" + HeaderContentLanguage = "Content-Language" + HeaderContentType = "Content-Type" + HeaderContentMD5 = "Content-MD5" + HeaderIfModifiedSince = "If-Modified-Since" + HeaderIfMatch = "If-Match" + HeaderIfNoneMatch = "If-None-Match" + HeaderIfUnmodifiedSince = "If-Unmodified-Since" + HeaderRange = "Range" +) + +const StorageAnalyticsVersion = "1.0" + +const crc64Polynomial uint64 = 0x9A6C9329AC4BC9B5 + +var CRC64Table = crc64.MakeTable(crc64Polynomial) + +const ( + // DefaultFilePermissionString is a constant for all intents and purposes. + // Inherit inherits permissions from the parent folder (default when creating files/folders) + DefaultFilePermissionString = "inherit" + + // DefaultCurrentTimeString sets creation/last write times to now + DefaultCurrentTimeString = "now" + + // DefaultPreserveString preserves old permissions on the file/folder (default when updating properties) + DefaultPreserveString = "preserve" + + // FileAttributesNone is defaults for file attributes when creating file. + // This attribute is valid only when used alone. + FileAttributesNone = "None" + + // FileAttributesDirectory is defaults for file attributes when creating directory. + // The attribute that identifies a directory + FileAttributesDirectory = "Directory" +) + +func GetClientOptions[T any](o *T) *T { + if o == nil { + return new(T) + } + return o +} + +var errConnectionString = errors.New("connection string is either blank or malformed. The expected connection string " + + "should contain key value pairs separated by semicolons. For example 'DefaultEndpointsProtocol=https;AccountName=;" + + "AccountKey=;EndpointSuffix=core.windows.net'") + +type ParsedConnectionString struct { + ServiceURL string + AccountName string + AccountKey string +} + +func ParseConnectionString(connectionString string) (ParsedConnectionString, error) { + const ( + defaultScheme = "https" + defaultSuffix = "core.windows.net" + ) + + connStrMap := make(map[string]string) + connectionString = strings.TrimRight(connectionString, ";") + + splitString := strings.Split(connectionString, ";") + if len(splitString) == 0 { + return ParsedConnectionString{}, errConnectionString + } + for _, stringPart := range splitString { + parts := strings.SplitN(stringPart, "=", 2) + if len(parts) != 2 { + return ParsedConnectionString{}, errConnectionString + } + connStrMap[parts[0]] = parts[1] + } + + accountName, ok := connStrMap["AccountName"] + if !ok { + return ParsedConnectionString{}, errors.New("connection string missing AccountName") + } + + accountKey, ok := connStrMap["AccountKey"] + if !ok { + sharedAccessSignature, ok := connStrMap["SharedAccessSignature"] + if !ok { + return ParsedConnectionString{}, errors.New("connection string missing AccountKey and SharedAccessSignature") + } + return ParsedConnectionString{ + ServiceURL: fmt.Sprintf("%v://%v.file.%v/?%v", defaultScheme, accountName, defaultSuffix, sharedAccessSignature), + }, nil + } + + protocol, ok := connStrMap["DefaultEndpointsProtocol"] + if !ok { + protocol = defaultScheme + } + + suffix, ok := connStrMap["EndpointSuffix"] + if !ok { + suffix = defaultSuffix + } + + if fileEndpoint, ok := connStrMap["FileEndpoint"]; ok { + return ParsedConnectionString{ + ServiceURL: fileEndpoint, + AccountName: accountName, + AccountKey: accountKey, + }, nil + } + + return ParsedConnectionString{ + ServiceURL: fmt.Sprintf("%v://%v.file.%v", protocol, accountName, suffix), + AccountName: accountName, + AccountKey: accountKey, + }, nil +} + +// IsIPEndpointStyle checks if URL's host is IP, in this case the storage account endpoint will be composed as: +// http(s)://IP(:port)/storageaccount/share(||container||etc)/... +// As url's Host property, host could be both host or host:port +func IsIPEndpointStyle(host string) bool { + if host == "" { + return false + } + if h, _, err := net.SplitHostPort(host); err == nil { + host = h + } + // For IPv6, there could be case where SplitHostPort fails for cannot finding port. + // In this case, eliminate the '[' and ']' in the URL. + // For details about IPv6 URL, please refer to https://tools.ietf.org/html/rfc2732 + if host[0] == '[' && host[len(host)-1] == ']' { + host = host[1 : len(host)-1] + } + return net.ParseIP(host) != nil +} + +func GenerateLeaseID(leaseID *string) (*string, error) { + if leaseID == nil { + generatedUuid, err := uuid.New() + if err != nil { + return nil, err + } + leaseID = to.Ptr(generatedUuid.String()) + } + return leaseID, nil +} + +func ValidateSeekableStreamAt0AndGetCount(body io.ReadSeeker) (int64, error) { + if body == nil { // nil body is "logically" seekable to 0 and are 0 bytes long + return 0, nil + } + + err := validateSeekableStreamAt0(body) + if err != nil { + return 0, err + } + + count, err := body.Seek(0, io.SeekEnd) + if err != nil { + return 0, errors.New("body stream must be seekable") + } + + _, err = body.Seek(0, io.SeekStart) + if err != nil { + return 0, err + } + return count, nil +} + +// return an error if body is not a valid seekable stream at 0 +func validateSeekableStreamAt0(body io.ReadSeeker) error { + if body == nil { // nil body is "logically" seekable to 0 + return nil + } + if pos, err := body.Seek(0, io.SeekCurrent); pos != 0 || err != nil { + // Help detect programmer error + if err != nil { + return errors.New("body stream must be seekable") + } + return errors.New("body stream must be set to position 0") + } + return nil +} diff --git a/sdk/storage/azfile/internal/shared/shared_test.go b/sdk/storage/azfile/internal/shared/shared_test.go new file mode 100644 index 000000000000..1cd5da99469d --- /dev/null +++ b/sdk/storage/azfile/internal/shared/shared_test.go @@ -0,0 +1,95 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestParseConnectionStringInvalid(t *testing.T) { + badConnectionStrings := []string{ + "", + "foobar", + "foo;bar;baz", + "foo=;bar=;", + "=", + ";", + "=;==", + "foobar=baz=foo", + } + + for _, badConnStr := range badConnectionStrings { + parsed, err := ParseConnectionString(badConnStr) + require.Error(t, err) + require.Zero(t, parsed) + } +} + +func TestParseConnectionString(t *testing.T) { + connStr := "DefaultEndpointsProtocol=https;AccountName=dummyaccount;AccountKey=secretkeykey;EndpointSuffix=core.windows.net" + parsed, err := ParseConnectionString(connStr) + require.NoError(t, err) + require.Equal(t, "https://dummyaccount.file.core.windows.net", parsed.ServiceURL) + require.Equal(t, "dummyaccount", parsed.AccountName) + require.Equal(t, "secretkeykey", parsed.AccountKey) +} + +func TestParseConnectionStringHTTP(t *testing.T) { + connStr := "DefaultEndpointsProtocol=http;AccountName=dummyaccount;AccountKey=secretkeykey;EndpointSuffix=core.windows.net" + parsed, err := ParseConnectionString(connStr) + require.NoError(t, err) + require.Equal(t, "http://dummyaccount.file.core.windows.net", parsed.ServiceURL) + require.Equal(t, "dummyaccount", parsed.AccountName) + require.Equal(t, "secretkeykey", parsed.AccountKey) +} + +func TestParseConnectionStringBasic(t *testing.T) { + connStr := "AccountName=dummyaccount;AccountKey=secretkeykey" + parsed, err := ParseConnectionString(connStr) + require.NoError(t, err) + require.Equal(t, "https://dummyaccount.file.core.windows.net", parsed.ServiceURL) + require.Equal(t, "dummyaccount", parsed.AccountName) + require.Equal(t, "secretkeykey", parsed.AccountKey) +} + +func TestParseConnectionStringCustomDomain(t *testing.T) { + connStr := "AccountName=dummyaccount;AccountKey=secretkeykey;FileEndpoint=www.mydomain.com;" + parsed, err := ParseConnectionString(connStr) + require.NoError(t, err) + require.Equal(t, "www.mydomain.com", parsed.ServiceURL) + require.Equal(t, "dummyaccount", parsed.AccountName) + require.Equal(t, "secretkeykey", parsed.AccountKey) +} + +func TestParseConnectionStringSAS(t *testing.T) { + connStr := "AccountName=dummyaccount;SharedAccessSignature=fakesharedaccesssignature;" + parsed, err := ParseConnectionString(connStr) + require.NoError(t, err) + require.Equal(t, "https://dummyaccount.file.core.windows.net/?fakesharedaccesssignature", parsed.ServiceURL) + require.Empty(t, parsed.AccountName) + require.Empty(t, parsed.AccountKey) +} + +func TestParseConnectionStringChinaCloud(t *testing.T) { + connStr := "AccountName=dummyaccountname;AccountKey=secretkeykey;DefaultEndpointsProtocol=http;EndpointSuffix=core.chinacloudapi.cn;" + parsed, err := ParseConnectionString(connStr) + require.NoError(t, err) + require.Equal(t, "http://dummyaccountname.file.core.chinacloudapi.cn", parsed.ServiceURL) + require.Equal(t, "dummyaccountname", parsed.AccountName) + require.Equal(t, "secretkeykey", parsed.AccountKey) +} + +func TestCParseConnectionStringAzurite(t *testing.T) { + connStr := "DefaultEndpointsProtocol=http;AccountName=dummyaccountname;AccountKey=secretkeykey;FileEndpoint=http://local-machine:11002/custom/account/path/faketokensignature;" + parsed, err := ParseConnectionString(connStr) + require.NoError(t, err) + require.Equal(t, "http://local-machine:11002/custom/account/path/faketokensignature", parsed.ServiceURL) + require.Equal(t, "dummyaccountname", parsed.AccountName) + require.Equal(t, "secretkeykey", parsed.AccountKey) +} diff --git a/sdk/storage/azfile/internal/testcommon/clients_auth.go b/sdk/storage/azfile/internal/testcommon/clients_auth.go new file mode 100644 index 000000000000..8e2e562116f0 --- /dev/null +++ b/sdk/storage/azfile/internal/testcommon/clients_auth.go @@ -0,0 +1,224 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +// Contains common helpers for TESTS ONLY +package testcommon + +import ( + "context" + "errors" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/internal/recording" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/directory" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/service" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/share" + "github.com/stretchr/testify/require" + "strings" + "testing" + "time" +) + +type TestAccountType string + +const ( + TestAccountDefault TestAccountType = "" + TestAccountSecondary TestAccountType = "SECONDARY_" + TestAccountPremium TestAccountType = "PREMIUM_" + TestAccountSoftDelete TestAccountType = "SOFT_DELETE_" +) + +const ( + DefaultEndpointSuffix = "core.windows.net/" + DefaultFileEndpointSuffix = "file.core.windows.net/" + AccountNameEnvVar = "AZURE_STORAGE_ACCOUNT_NAME" + AccountKeyEnvVar = "AZURE_STORAGE_ACCOUNT_KEY" + DefaultEndpointSuffixEnvVar = "AZURE_STORAGE_ENDPOINT_SUFFIX" +) + +const ( + FakeStorageAccount = "fakestorage" + FakeStorageURL = "https://fakestorage.file.core.windows.net" + FakeToken = "faketoken" +) + +const ( + ISO8601 = "2006-01-02T15:04:05.0000000Z07:00" +) + +var ( + SampleSDDL = `O:S-1-5-32-548G:S-1-5-21-397955417-626881126-188441444-512D:(A;;RPWPCCDCLCSWRCWDWOGA;;;S-1-0-0)` +) + +var BasicMetadata = map[string]*string{ + "foo": to.Ptr("foovalue"), + "bar": to.Ptr("barvalue"), +} + +func SetClientOptions(t *testing.T, opts *azcore.ClientOptions) { + opts.Logging.AllowedHeaders = append(opts.Logging.AllowedHeaders, "X-Request-Mismatch", "X-Request-Mismatch-Error") + + transport, err := recording.NewRecordingHTTPClient(t, nil) + require.NoError(t, err) + opts.Transport = transport +} + +func GetServiceClient(t *testing.T, accountType TestAccountType, options *service.ClientOptions) (*service.Client, error) { + if options == nil { + options = &service.ClientOptions{} + } + + SetClientOptions(t, &options.ClientOptions) + + cred, err := GetGenericSharedKeyCredential(accountType) + if err != nil { + return nil, err + } + + serviceClient, err := service.NewClientWithSharedKeyCredential("https://"+cred.AccountName()+".file.core.windows.net/", cred, options) + + return serviceClient, err +} + +func GetServiceClientNoCredential(t *testing.T, sasUrl string, options *service.ClientOptions) (*service.Client, error) { + if options == nil { + options = &service.ClientOptions{} + } + + SetClientOptions(t, &options.ClientOptions) + + serviceClient, err := service.NewClientWithNoCredential(sasUrl, options) + + return serviceClient, err +} + +func GetGenericAccountInfo(accountType TestAccountType) (string, string) { + if recording.GetRecordMode() == recording.PlaybackMode { + return FakeStorageAccount, "ZmFrZQ==" + } + accountNameEnvVar := string(accountType) + AccountNameEnvVar + accountKeyEnvVar := string(accountType) + AccountKeyEnvVar + accountName, _ := GetRequiredEnv(accountNameEnvVar) + accountKey, _ := GetRequiredEnv(accountKeyEnvVar) + return accountName, accountKey +} + +func GetGenericSharedKeyCredential(accountType TestAccountType) (*service.SharedKeyCredential, error) { + accountName, accountKey := GetGenericAccountInfo(accountType) + if accountName == "" || accountKey == "" { + return nil, errors.New(string(accountType) + AccountNameEnvVar + " and/or " + string(accountType) + AccountKeyEnvVar + " environment variables not specified.") + } + return service.NewSharedKeyCredential(accountName, accountKey) +} + +func GetGenericConnectionString(accountType TestAccountType) (*string, error) { + accountName, accountKey := GetGenericAccountInfo(accountType) + if accountName == "" || accountKey == "" { + return nil, errors.New(string(accountType) + AccountNameEnvVar + " and/or " + string(accountType) + AccountKeyEnvVar + " environment variables not specified.") + } + connectionString := fmt.Sprintf("DefaultEndpointsProtocol=https;AccountName=%s;AccountKey=%s;EndpointSuffix=core.windows.net/", + accountName, accountKey) + return &connectionString, nil +} + +type FakeCredential struct { +} + +func (c *FakeCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + return azcore.AccessToken{Token: FakeToken, ExpiresOn: time.Now().Add(time.Hour).UTC()}, nil +} + +func GetGenericTokenCredential() (azcore.TokenCredential, error) { + if recording.GetRecordMode() == recording.PlaybackMode { + return &FakeCredential{}, nil + } + return azidentity.NewDefaultAzureCredential(nil) +} + +func GetServiceClientFromConnectionString(t *testing.T, accountType TestAccountType, options *service.ClientOptions) (*service.Client, error) { + if options == nil { + options = &service.ClientOptions{} + } + SetClientOptions(t, &options.ClientOptions) + + transport, err := recording.NewRecordingHTTPClient(t, nil) + require.NoError(t, err) + options.Transport = transport + + cred, err := GetGenericConnectionString(accountType) + if err != nil { + return nil, err + } + svcClient, err := service.NewClientFromConnectionString(*cred, options) + return svcClient, err +} + +func GetShareClient(shareName string, s *service.Client) *share.Client { + return s.NewShareClient(shareName) +} + +func CreateNewShare(ctx context.Context, _require *require.Assertions, shareName string, svcClient *service.Client) *share.Client { + shareClient := GetShareClient(shareName, svcClient) + _, err := shareClient.Create(ctx, nil) + _require.NoError(err) + return shareClient +} + +func DeleteShare(ctx context.Context, _require *require.Assertions, shareClient *share.Client) { + _, err := shareClient.Delete(ctx, nil) + _require.NoError(err) +} + +func GetDirectoryClient(dirName string, s *share.Client) *directory.Client { + return s.NewDirectoryClient(dirName) +} + +func CreateNewDirectory(ctx context.Context, _require *require.Assertions, dirName string, shareClient *share.Client) *directory.Client { + dirClient := GetDirectoryClient(dirName, shareClient) + _, err := dirClient.Create(ctx, nil) + _require.NoError(err) + return dirClient +} + +func DeleteDirectory(ctx context.Context, _require *require.Assertions, dirClient *directory.Client) { + _, err := dirClient.Delete(ctx, nil) + _require.NoError(err) +} + +func GetFileClientFromShare(fileName string, shareClient *share.Client) *file.Client { + return shareClient.NewRootDirectoryClient().NewFileClient(fileName) +} + +func CreateNewFileFromShare(ctx context.Context, _require *require.Assertions, fileName string, fileSize int64, shareClient *share.Client) *file.Client { + fClient := GetFileClientFromShare(fileName, shareClient) + + _, err := fClient.Create(ctx, fileSize, nil) + _require.NoError(err) + + return fClient +} + +func CreateNewFileFromShareWithData(ctx context.Context, _require *require.Assertions, fileName string, shareClient *share.Client) *file.Client { + fClient := GetFileClientFromShare(fileName, shareClient) + + _, err := fClient.Create(ctx, int64(len(FileDefaultData)), nil) + _require.NoError(err) + + _, err = fClient.UploadRange(ctx, 0, streaming.NopCloser(strings.NewReader(FileDefaultData)), nil) + _require.NoError(err) + + return fClient +} + +func DeleteFile(ctx context.Context, _require *require.Assertions, fileClient *file.Client) { + _, err := fileClient.Delete(ctx, nil) + _require.NoError(err) +} diff --git a/sdk/storage/azfile/internal/testcommon/common.go b/sdk/storage/azfile/internal/testcommon/common.go new file mode 100644 index 000000000000..11e61800da79 --- /dev/null +++ b/sdk/storage/azfile/internal/testcommon/common.go @@ -0,0 +1,117 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +// Contains common helpers for TESTS ONLY +package testcommon + +import ( + "bytes" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/internal/recording" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/fileerror" + "github.com/stretchr/testify/require" + "io" + "os" + "strconv" + "strings" + "testing" +) + +const ( + SharePrefix = "gos" + DirectoryPrefix = "godir" + FilePrefix = "gotestfile" + FileDefaultData = "GoFileDefaultData" +) + +func GenerateShareName(testName string) string { + return SharePrefix + GenerateEntityName(testName) +} + +func GenerateEntityName(testName string) string { + return strings.ReplaceAll(strings.ReplaceAll(strings.ToLower(testName), "/", ""), "test", "") +} + +func GenerateDirectoryName(testName string) string { + return DirectoryPrefix + GenerateEntityName(testName) +} + +func GenerateFileName(testName string) string { + return FilePrefix + GenerateEntityName(testName) +} + +const random64BString string = "2SDgZj6RkKYzJpu04sweQek4uWHO8ndPnYlZ0tnFS61hjnFZ5IkvIGGY44eKABov" + +func GenerateData(sizeInBytes int) (io.ReadSeekCloser, []byte) { + data := make([]byte, sizeInBytes) + _len := len(random64BString) + if sizeInBytes > _len { + count := sizeInBytes / _len + if sizeInBytes%_len != 0 { + count = count + 1 + } + copy(data[:], strings.Repeat(random64BString, count)) + } else { + copy(data[:], random64BString) + } + return streaming.NopCloser(bytes.NewReader(data)), data +} + +func ValidateHTTPErrorCode(_require *require.Assertions, err error, code int) { + _require.Error(err) + var responseErr *azcore.ResponseError + errors.As(err, &responseErr) + if responseErr != nil { + _require.Equal(responseErr.StatusCode, code) + } else { + _require.Equal(strings.Contains(err.Error(), strconv.Itoa(code)), true) + } +} + +func ValidateFileErrorCode(_require *require.Assertions, err error, code fileerror.Code) { + _require.Error(err) + var responseErr *azcore.ResponseError + errors.As(err, &responseErr) + if responseErr != nil { + _require.Equal(string(code), responseErr.ErrorCode) + } else { + _require.Contains(err.Error(), code) + } +} + +// GetRequiredEnv gets an environment variable by name and returns an error if it is not found +func GetRequiredEnv(name string) (string, error) { + env, ok := os.LookupEnv(name) + if ok { + return env, nil + } else { + return "", errors.New("Required environment variable not set: " + name) + } +} + +func BeforeTest(t *testing.T, suite string, test string) { + const urlRegex = `https://\S+\.file\.core\.windows\.net` + const tokenRegex = `(?:Bearer\s).*` + + require.NoError(t, recording.AddURISanitizer(FakeStorageURL, urlRegex, nil)) + require.NoError(t, recording.AddHeaderRegexSanitizer("x-ms-copy-source", FakeStorageURL, urlRegex, nil)) + require.NoError(t, recording.AddHeaderRegexSanitizer("x-ms-copy-source-authorization", FakeToken, tokenRegex, nil)) + // we freeze request IDs and timestamps to avoid creating noisy diffs + // NOTE: we can't freeze time stamps as that breaks some tests that use if-modified-since etc (maybe it can be fixed?) + //testframework.AddHeaderRegexSanitizer("X-Ms-Date", "Wed, 10 Aug 2022 23:34:14 GMT", "", nil) + require.NoError(t, recording.AddHeaderRegexSanitizer("x-ms-request-id", "00000000-0000-0000-0000-000000000000", "", nil)) + //testframework.AddHeaderRegexSanitizer("Date", "Wed, 10 Aug 2022 23:34:14 GMT", "", nil) + // TODO: more freezing + //testframework.AddBodyRegexSanitizer("RequestId:00000000-0000-0000-0000-000000000000", `RequestId:\w{8}-\w{4}-\w{4}-\w{4}-\w{12}`, nil) + //testframework.AddBodyRegexSanitizer("Time:2022-08-11T00:21:56.4562741Z", `Time:\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(?:\.\d*)?Z`, nil) + require.NoError(t, recording.Start(t, "sdk/storage/azfile/testdata", nil)) +} + +func AfterTest(t *testing.T, suite string, test string) { + require.NoError(t, recording.Stop(t, nil)) +} diff --git a/sdk/storage/azfile/lease/client_test.go b/sdk/storage/azfile/lease/client_test.go new file mode 100644 index 000000000000..7b90fbfa9f7f --- /dev/null +++ b/sdk/storage/azfile/lease/client_test.go @@ -0,0 +1,633 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package lease_test + +import ( + "context" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/recording" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/testcommon" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/lease" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/share" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "testing" + "time" +) + +func Test(t *testing.T) { + recordMode := recording.GetRecordMode() + t.Logf("Running lease Tests in %s mode\n", recordMode) + if recordMode == recording.LiveMode { + suite.Run(t, &LeaseRecordedTestsSuite{}) + suite.Run(t, &LeaseUnrecordedTestsSuite{}) + } else if recordMode == recording.PlaybackMode { + suite.Run(t, &LeaseRecordedTestsSuite{}) + } else if recordMode == recording.RecordingMode { + suite.Run(t, &LeaseRecordedTestsSuite{}) + } +} + +func (l *LeaseRecordedTestsSuite) BeforeTest(suite string, test string) { + testcommon.BeforeTest(l.T(), suite, test) +} + +func (l *LeaseRecordedTestsSuite) AfterTest(suite string, test string) { + testcommon.AfterTest(l.T(), suite, test) +} + +func (l *LeaseUnrecordedTestsSuite) BeforeTest(suite string, test string) { + +} + +func (l *LeaseUnrecordedTestsSuite) AfterTest(suite string, test string) { + +} + +type LeaseRecordedTestsSuite struct { + suite.Suite +} + +type LeaseUnrecordedTestsSuite struct { + suite.Suite +} + +var proposedLeaseIDs = []*string{to.Ptr("c820a799-76d7-4ee2-6e15-546f19325c2c"), to.Ptr("326cc5e1-746e-4af8-4811-a50e6629a8ca")} + +func (l *LeaseRecordedTestsSuite) TestShareAcquireLease() { + _require := require.New(l.T()) + testName := l.T().Name() + + svcClient, err := testcommon.GetServiceClient(l.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + shareLeaseClient, _ := lease.NewShareClient(shareClient, &lease.ShareClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + + ctx := context.Background() + acquireLeaseResponse, err := shareLeaseClient.Acquire(ctx, int32(60), nil) + _require.NoError(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(*acquireLeaseResponse.LeaseID, *shareLeaseClient.LeaseID()) + + _, err = shareClient.Delete(ctx, nil) + _require.Error(err) + + _, err = shareLeaseClient.Release(ctx, nil) + _require.NoError(err) +} + +func (l *LeaseRecordedTestsSuite) TestNegativeShareAcquireMultipleLease() { + _require := require.New(l.T()) + testName := l.T().Name() + + svcClient, err := testcommon.GetServiceClient(l.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + shareLeaseClient0, _ := lease.NewShareClient(shareClient, &lease.ShareClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + + shareLeaseClient1, _ := lease.NewShareClient(shareClient, &lease.ShareClientOptions{ + LeaseID: proposedLeaseIDs[1], + }) + + ctx := context.Background() + acquireLeaseResponse0, err := shareLeaseClient0.Acquire(ctx, int32(60), nil) + _require.NoError(err) + _require.NotNil(acquireLeaseResponse0.LeaseID) + _require.EqualValues(*acquireLeaseResponse0.LeaseID, *shareLeaseClient0.LeaseID()) + + // acquiring lease for the second time returns LeaseAlreadyPresent error + _, err = shareLeaseClient1.Acquire(ctx, int32(60), nil) + _require.Error(err) + + _, err = shareLeaseClient0.Release(ctx, nil) + _require.NoError(err) +} + +func (l *LeaseRecordedTestsSuite) TestShareDeleteShareWithoutLeaseId() { + _require := require.New(l.T()) + testName := l.T().Name() + + svcClient, err := testcommon.GetServiceClient(l.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + shareLeaseClient, _ := lease.NewShareClient(shareClient, &lease.ShareClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + + ctx := context.Background() + acquireLeaseResponse, err := shareLeaseClient.Acquire(ctx, int32(60), nil) + _require.NoError(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(*acquireLeaseResponse.LeaseID, *shareLeaseClient.LeaseID()) + + _, err = shareClient.Delete(ctx, nil) + _require.Error(err) + + leaseID := shareLeaseClient.LeaseID() + _, err = shareClient.Delete(ctx, &share.DeleteOptions{ + LeaseAccessConditions: &share.LeaseAccessConditions{LeaseID: leaseID}, + }) + _require.NoError(err) +} + +func (l *LeaseRecordedTestsSuite) TestShareReleaseLease() { + _require := require.New(l.T()) + testName := l.T().Name() + + svcClient, err := testcommon.GetServiceClient(l.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + shareLeaseClient, _ := lease.NewShareClient(shareClient, &lease.ShareClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + + ctx := context.Background() + acquireLeaseResponse, err := shareLeaseClient.Acquire(ctx, int32(60), nil) + _require.NoError(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(*acquireLeaseResponse.LeaseID, *shareLeaseClient.LeaseID()) + + _, err = shareClient.Delete(ctx, nil) + _require.Error(err) + + _, err = shareLeaseClient.Release(ctx, nil) + _require.NoError(err) + + _, err = shareClient.Delete(ctx, nil) + _require.NoError(err) +} + +func (l *LeaseRecordedTestsSuite) TestShareRenewLease() { + _require := require.New(l.T()) + testName := l.T().Name() + + svcClient, err := testcommon.GetServiceClient(l.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + shareLeaseClient, _ := lease.NewShareClient(shareClient, &lease.ShareClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + + ctx := context.Background() + acquireLeaseResponse, err := shareLeaseClient.Acquire(ctx, int32(15), nil) + _require.NoError(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(*acquireLeaseResponse.LeaseID, *shareLeaseClient.LeaseID()) + + _, err = shareLeaseClient.Renew(ctx, nil) + _require.NoError(err) + + _, err = shareLeaseClient.Release(ctx, nil) + _require.NoError(err) +} + +func (l *LeaseRecordedTestsSuite) TestShareBreakLeaseDefault() { + _require := require.New(l.T()) + testName := l.T().Name() + + svcClient, err := testcommon.GetServiceClient(l.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + shareLeaseClient, _ := lease.NewShareClient(shareClient, &lease.ShareClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + + ctx := context.Background() + acquireLeaseResponse, err := shareLeaseClient.Acquire(ctx, int32(60), nil) + _require.NoError(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(*acquireLeaseResponse.LeaseID, *shareLeaseClient.LeaseID()) + + bResp, err := shareLeaseClient.Break(ctx, nil) + _require.NoError(err) + _require.NotNil(bResp.LeaseTime) + + _, err = shareClient.Delete(ctx, nil) + _require.Error(err) + + _, err = shareLeaseClient.Release(ctx, nil) + _require.NoError(err) +} + +func (l *LeaseRecordedTestsSuite) TestShareBreakLeaseNonDefault() { + _require := require.New(l.T()) + testName := l.T().Name() + + svcClient, err := testcommon.GetServiceClient(l.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + shareLeaseClient, _ := lease.NewShareClient(shareClient, &lease.ShareClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + + ctx := context.Background() + acquireLeaseResponse, err := shareLeaseClient.Acquire(ctx, int32(60), nil) + _require.NoError(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(*acquireLeaseResponse.LeaseID, *shareLeaseClient.LeaseID()) + + bResp, err := shareLeaseClient.Break(ctx, &lease.ShareBreakOptions{ + BreakPeriod: to.Ptr((int32)(5)), + }) + _require.NoError(err) + _require.NotNil(bResp.LeaseTime) + + _, err = shareClient.Delete(ctx, nil) + _require.Error(err) + + // wait for lease to expire + time.Sleep(6 * time.Second) + + _, err = shareClient.Delete(ctx, nil) + _require.NoError(err) +} + +func (l *LeaseRecordedTestsSuite) TestNegativeShareBreakRenewLease() { + _require := require.New(l.T()) + testName := l.T().Name() + + svcClient, err := testcommon.GetServiceClient(l.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + shareLeaseClient, _ := lease.NewShareClient(shareClient, &lease.ShareClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + + ctx := context.Background() + acquireLeaseResponse, err := shareLeaseClient.Acquire(ctx, int32(60), nil) + _require.NoError(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(*acquireLeaseResponse.LeaseID, *shareLeaseClient.LeaseID()) + + bResp, err := shareLeaseClient.Break(ctx, &lease.ShareBreakOptions{ + BreakPeriod: to.Ptr((int32)(5)), + }) + _require.NoError(err) + _require.NotNil(bResp.LeaseTime) + + // renewing broken lease returns error + _, err = shareLeaseClient.Renew(ctx, nil) + _require.Error(err) + + _, err = shareLeaseClient.Release(ctx, nil) + _require.NoError(err) +} + +func (l *LeaseRecordedTestsSuite) TestShareChangeLease() { + _require := require.New(l.T()) + testName := l.T().Name() + + svcClient, err := testcommon.GetServiceClient(l.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + shareLeaseClient, _ := lease.NewShareClient(shareClient, &lease.ShareClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + + ctx := context.Background() + acquireLeaseResponse, err := shareLeaseClient.Acquire(ctx, int32(60), nil) + _require.NoError(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(*acquireLeaseResponse.LeaseID, *shareLeaseClient.LeaseID()) + + oldLeaseID := shareLeaseClient.LeaseID() + + changeLeaseResp, err := shareLeaseClient.Change(ctx, *proposedLeaseIDs[1], nil) + _require.NoError(err) + _require.EqualValues(changeLeaseResp.LeaseID, proposedLeaseIDs[1]) + _require.EqualValues(shareLeaseClient.LeaseID(), proposedLeaseIDs[1]) + + _, err = shareClient.Delete(ctx, &share.DeleteOptions{ + LeaseAccessConditions: &share.LeaseAccessConditions{ + LeaseID: oldLeaseID, + }, + }) + _require.Error(err) + + _, err = shareLeaseClient.Renew(ctx, nil) + _require.NoError(err) + + _, err = shareLeaseClient.Release(ctx, nil) + _require.NoError(err) +} + +func (l *LeaseRecordedTestsSuite) TestFileAcquireLease() { + _require := require.New(l.T()) + testName := l.T().Name() + + svcClient, err := testcommon.GetServiceClient(l.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + ctx := context.Background() + fileName := testcommon.GenerateFileName(testName) + fileClient := shareClient.NewRootDirectoryClient().NewFileClient(fileName) + _, err = fileClient.Create(ctx, 0, nil) + _require.NoError(err) + + fileLeaseClient, err := lease.NewFileClient(fileClient, &lease.FileClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + _require.NoError(err) + + acquireLeaseResponse, err := fileLeaseClient.Acquire(ctx, nil) + _require.NoError(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(acquireLeaseResponse.LeaseID, fileLeaseClient.LeaseID()) + + _, err = fileClient.Delete(ctx, nil) + _require.Error(err) + + _, err = fileLeaseClient.Release(ctx, nil) + _require.NoError(err) +} + +func (l *LeaseRecordedTestsSuite) TestNegativeFileAcquireMultipleLease() { + _require := require.New(l.T()) + testName := l.T().Name() + + svcClient, err := testcommon.GetServiceClient(l.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + ctx := context.Background() + fileName := testcommon.GenerateFileName(testName) + fileClient := shareClient.NewRootDirectoryClient().NewFileClient(fileName) + _, err = fileClient.Create(ctx, 0, nil) + _require.NoError(err) + + fileLeaseClient0, err := lease.NewFileClient(fileClient, &lease.FileClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + _require.NoError(err) + + fileLeaseClient1, err := lease.NewFileClient(fileClient, &lease.FileClientOptions{ + LeaseID: proposedLeaseIDs[1], + }) + _require.NoError(err) + + acquireLeaseResponse, err := fileLeaseClient0.Acquire(ctx, nil) + _require.NoError(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(acquireLeaseResponse.LeaseID, fileLeaseClient0.LeaseID()) + + // acquiring lease for the second time returns LeaseAlreadyPresent error + _, err = fileLeaseClient1.Acquire(ctx, nil) + _require.Error(err) + + _, err = fileClient.Delete(ctx, nil) + _require.Error(err) + + _, err = fileLeaseClient0.Release(ctx, nil) + _require.NoError(err) +} + +func (l *LeaseRecordedTestsSuite) TestDeleteFileWithoutLeaseId() { + _require := require.New(l.T()) + testName := l.T().Name() + + svcClient, err := testcommon.GetServiceClient(l.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + ctx := context.Background() + fileName := testcommon.GenerateFileName(testName) + fileClient := shareClient.NewRootDirectoryClient().NewFileClient(fileName) + _, err = fileClient.Create(ctx, 0, nil) + _require.NoError(err) + + fileLeaseClient, err := lease.NewFileClient(fileClient, &lease.FileClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + _require.NoError(err) + + acquireLeaseResponse, err := fileLeaseClient.Acquire(ctx, nil) + _require.NoError(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(acquireLeaseResponse.LeaseID, fileLeaseClient.LeaseID()) + + _, err = fileClient.Delete(ctx, nil) + _require.Error(err) + + leaseID := fileLeaseClient.LeaseID() + _, err = fileClient.Delete(ctx, &file.DeleteOptions{ + LeaseAccessConditions: &file.LeaseAccessConditions{ + LeaseID: leaseID, + }, + }) + _require.NoError(err) +} + +func (l *LeaseRecordedTestsSuite) TestFileReleaseLease() { + _require := require.New(l.T()) + testName := l.T().Name() + + svcClient, err := testcommon.GetServiceClient(l.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + ctx := context.Background() + fileName := testcommon.GenerateFileName(testName) + fileClient := shareClient.NewRootDirectoryClient().NewFileClient(fileName) + _, err = fileClient.Create(ctx, 0, nil) + _require.NoError(err) + + fileLeaseClient, err := lease.NewFileClient(fileClient, &lease.FileClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + _require.NoError(err) + + acquireLeaseResponse, err := fileLeaseClient.Acquire(ctx, nil) + _require.NoError(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(acquireLeaseResponse.LeaseID, fileLeaseClient.LeaseID()) + + _, err = fileClient.Delete(ctx, nil) + _require.Error(err) + + _, err = fileLeaseClient.Release(ctx, nil) + _require.NoError(err) + + _, err = fileClient.Delete(ctx, nil) + _require.NoError(err) +} + +func (l *LeaseRecordedTestsSuite) TestFileChangeLease() { + _require := require.New(l.T()) + testName := l.T().Name() + + svcClient, err := testcommon.GetServiceClient(l.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + ctx := context.Background() + fileName := testcommon.GenerateFileName(testName) + fileClient := shareClient.NewRootDirectoryClient().NewFileClient(fileName) + _, err = fileClient.Create(ctx, 0, nil) + _require.NoError(err) + + fileLeaseClient, err := lease.NewFileClient(fileClient, &lease.FileClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + _require.NoError(err) + + acquireLeaseResponse, err := fileLeaseClient.Acquire(ctx, nil) + _require.NoError(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.Equal(*acquireLeaseResponse.LeaseID, *proposedLeaseIDs[0]) + + oldLeaseID := fileLeaseClient.LeaseID() + + changeLeaseResp, err := fileLeaseClient.Change(ctx, *proposedLeaseIDs[1], nil) + _require.NoError(err) + _require.Equal(*changeLeaseResp.LeaseID, *proposedLeaseIDs[1]) + + _, err = fileClient.Delete(ctx, &file.DeleteOptions{ + LeaseAccessConditions: &file.LeaseAccessConditions{ + LeaseID: oldLeaseID, + }, + }) + _require.Error(err) + + _, err = fileLeaseClient.Release(ctx, nil) + _require.NoError(err) +} + +func (l *LeaseRecordedTestsSuite) TestNegativeFileDeleteAfterReleaseLease() { + _require := require.New(l.T()) + testName := l.T().Name() + + svcClient, err := testcommon.GetServiceClient(l.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + ctx := context.Background() + fileName := testcommon.GenerateFileName(testName) + fileClient := shareClient.NewRootDirectoryClient().NewFileClient(fileName) + _, err = fileClient.Create(ctx, 0, nil) + _require.NoError(err) + + fileLeaseClient, err := lease.NewFileClient(fileClient, &lease.FileClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + _require.NoError(err) + + acquireLeaseResponse, err := fileLeaseClient.Acquire(ctx, nil) + _require.NoError(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(acquireLeaseResponse.LeaseID, fileLeaseClient.LeaseID()) + + _, err = fileClient.Delete(ctx, nil) + _require.Error(err) + + _, err = fileLeaseClient.Release(ctx, nil) + _require.NoError(err) + + // deleting file after its lease has expired or released returns error. + _, err = fileClient.Delete(ctx, &file.DeleteOptions{ + LeaseAccessConditions: &file.LeaseAccessConditions{ + LeaseID: fileLeaseClient.LeaseID(), + }, + }) + _require.Error(err) +} + +func (l *LeaseRecordedTestsSuite) TestFileBreakLease() { + _require := require.New(l.T()) + testName := l.T().Name() + + svcClient, err := testcommon.GetServiceClient(l.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + ctx := context.Background() + fileName := testcommon.GenerateFileName(testName) + fileClient := shareClient.NewRootDirectoryClient().NewFileClient(fileName) + _, err = fileClient.Create(ctx, 0, nil) + _require.NoError(err) + + fileLeaseClient, err := lease.NewFileClient(fileClient, &lease.FileClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + _require.NoError(err) + + acquireLeaseResponse, err := fileLeaseClient.Acquire(ctx, nil) + _require.NoError(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(acquireLeaseResponse.LeaseID, fileLeaseClient.LeaseID()) + + _, err = fileClient.Delete(ctx, nil) + _require.Error(err) + + _, err = fileLeaseClient.Break(ctx, nil) + _require.NoError(err) + + _, err = fileClient.Delete(ctx, nil) + _require.NoError(err) +} diff --git a/sdk/storage/azfile/lease/constants.go b/sdk/storage/azfile/lease/constants.go new file mode 100644 index 000000000000..3b384475deb0 --- /dev/null +++ b/sdk/storage/azfile/lease/constants.go @@ -0,0 +1,51 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package lease + +import "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" + +// DurationType - When a share is leased, specifies whether the lease is of infinite or fixed duration. +type DurationType = generated.LeaseDurationType + +const ( + DurationTypeInfinite DurationType = generated.LeaseDurationTypeInfinite + DurationTypeFixed DurationType = generated.LeaseDurationTypeFixed +) + +// PossibleDurationTypeValues returns the possible values for the DurationType const type. +func PossibleDurationTypeValues() []DurationType { + return generated.PossibleLeaseDurationTypeValues() +} + +// StateType - Lease state of the share. +type StateType = generated.LeaseStateType + +const ( + StateTypeAvailable StateType = generated.LeaseStateTypeAvailable + StateTypeLeased StateType = generated.LeaseStateTypeLeased + StateTypeExpired StateType = generated.LeaseStateTypeExpired + StateTypeBreaking StateType = generated.LeaseStateTypeBreaking + StateTypeBroken StateType = generated.LeaseStateTypeBroken +) + +// PossibleStateTypeValues returns the possible values for the StateType const type. +func PossibleStateTypeValues() []StateType { + return generated.PossibleLeaseStateTypeValues() +} + +// StatusType - The current lease status of the share. +type StatusType = generated.LeaseStatusType + +const ( + StatusTypeLocked StatusType = generated.LeaseStatusTypeLocked + StatusTypeUnlocked StatusType = generated.LeaseStatusTypeUnlocked +) + +// PossibleStatusTypeValues returns the possible values for the StatusType const type. +func PossibleStatusTypeValues() []StatusType { + return generated.PossibleLeaseStatusTypeValues() +} diff --git a/sdk/storage/azfile/lease/examples_test.go b/sdk/storage/azfile/lease/examples_test.go new file mode 100644 index 000000000000..f8b61b3ba133 --- /dev/null +++ b/sdk/storage/azfile/lease/examples_test.go @@ -0,0 +1,101 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package lease_test + +import ( + "context" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/lease" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/service" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/share" + "log" + "os" +) + +func handleError(err error) { + if err != nil { + log.Fatal(err.Error()) + } +} + +// This example shows how to perform various lease operations on a share. +// The same lease operations can be performed on individual files as well. +// A lease on a share prevents it from being deleted by others, while a lease on a file +// protects it from both modifications and deletions. +func Example_lease_ShareClient_AcquireLease() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + shareName := "testshare" + shareURL := fmt.Sprintf("https://%s.file.core.windows.net/%s", accountName, shareName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + shareClient, err := share.NewClientWithSharedKeyCredential(shareURL, cred, nil) + handleError(err) + + // Create a unique ID for the lease + // A lease ID can be any valid GUID string format. To generate UUIDs, consider the github.com/google/uuid package + leaseID := "36b1a876-cf98-4eb2-a5c3-6d68489658ff" + shareLeaseClient, err := lease.NewShareClient(shareClient, &lease.ShareClientOptions{LeaseID: to.Ptr(leaseID)}) + handleError(err) + + // Now acquire a lease on the share. + // You can choose to pass an empty string for proposed ID so that the service automatically assigns one for you. + duration := int32(60) + acquireLeaseResponse, err := shareLeaseClient.Acquire(context.TODO(), duration, nil) + handleError(err) + fmt.Println("The share is leased for delete operations with lease ID", *acquireLeaseResponse.LeaseID) + + // The share cannot be deleted without providing the lease ID. + _, err = shareClient.Delete(context.TODO(), nil) + if err == nil { + log.Fatal("delete should have failed") + } + + fmt.Println("The share cannot be deleted while there is an active lease") + + // share can be deleted by providing the lease id + //_, err = shareClient.Delete(context.TODO(), &share.DeleteOptions{ + // LeaseAccessConditions: &share.LeaseAccessConditions{LeaseID: acquireLeaseResponse.LeaseID}, + //}) + + // We can release the lease now and the share can be deleted. + _, err = shareLeaseClient.Release(context.TODO(), nil) + handleError(err) + fmt.Println("The lease on the share is now released") + + // AcquireLease a lease again to perform other operations. + // Duration is still 60 + acquireLeaseResponse, err = shareLeaseClient.Acquire(context.TODO(), duration, nil) + handleError(err) + fmt.Println("The share is leased again with lease ID", *acquireLeaseResponse.LeaseID) + + // We can change the ID of an existing lease. + newLeaseID := "6b3e65e5-e1bb-4a3f-8b72-13e9bc9cd3bf" + changeLeaseResponse, err := shareLeaseClient.Change(context.TODO(), newLeaseID, nil) + handleError(err) + fmt.Println("The lease ID was changed to", *changeLeaseResponse.LeaseID) + + // The lease can be renewed. + renewLeaseResponse, err := shareLeaseClient.Renew(context.TODO(), nil) + handleError(err) + fmt.Println("The lease was renewed with the same ID", *renewLeaseResponse.LeaseID) + + // Finally, the lease can be broken, and we could prevent others from acquiring a lease for a period of time + _, err = shareLeaseClient.Break(context.TODO(), &lease.ShareBreakOptions{BreakPeriod: to.Ptr(int32(60))}) + handleError(err) + fmt.Println("The lease was broken, and nobody can acquire a lease for 60 seconds") +} diff --git a/sdk/storage/azfile/lease/file_client.go b/sdk/storage/azfile/lease/file_client.go new file mode 100644 index 000000000000..b1bffc781a5b --- /dev/null +++ b/sdk/storage/azfile/lease/file_client.go @@ -0,0 +1,103 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package lease + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/shared" +) + +// FileClient provides lease functionality for the underlying file client. +type FileClient struct { + fileClient *file.Client + leaseID *string +} + +// FileClientOptions contains the optional values when creating a FileClient. +type FileClientOptions struct { + // LeaseID contains a caller-provided lease ID. + LeaseID *string +} + +// NewFileClient creates a file lease client for the provided file client. +// - client - an instance of a file client +// - options - client options; pass nil to accept the default values +func NewFileClient(client *file.Client, options *FileClientOptions) (*FileClient, error) { + var leaseID *string + if options != nil { + leaseID = options.LeaseID + } + + leaseID, err := shared.GenerateLeaseID(leaseID) + if err != nil { + return nil, err + } + + return &FileClient{ + fileClient: client, + leaseID: leaseID, + }, nil +} + +func (f *FileClient) generated() *generated.FileClient { + return base.InnerClient((*base.Client[generated.FileClient])(f.fileClient)) +} + +// LeaseID returns leaseID of the client. +func (f *FileClient) LeaseID() *string { + return f.leaseID +} + +// Acquire operation can be used to request a new lease. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/lease-file. +func (f *FileClient) Acquire(ctx context.Context, options *FileAcquireOptions) (FileAcquireResponse, error) { + opts := options.format(f.LeaseID()) + resp, err := f.generated().AcquireLease(ctx, (int32)(-1), opts) + return resp, err +} + +// Break operation can be used to break the lease, if the file has an active lease. Once a lease is broken, it cannot be renewed. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/lease-file. +func (f *FileClient) Break(ctx context.Context, options *FileBreakOptions) (FileBreakResponse, error) { + opts, leaseAccessConditions := options.format() + resp, err := f.generated().BreakLease(ctx, opts, leaseAccessConditions) + return resp, err +} + +// Change operation can be used to change the lease ID of an active lease. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/lease-file. +func (f *FileClient) Change(ctx context.Context, proposedLeaseID string, options *FileChangeOptions) (FileChangeResponse, error) { + if f.LeaseID() == nil { + return FileChangeResponse{}, errors.New("leaseID cannot be nil") + } + + opts := options.format(&proposedLeaseID) + resp, err := f.generated().ChangeLease(ctx, *f.LeaseID(), opts) + + // If lease has been changed successfully, set the leaseID in client + if err == nil { + f.leaseID = &proposedLeaseID + } + + return resp, err +} + +// Release operation can be used to free the lease if it is no longer needed so that another client may immediately acquire a lease against the file. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/lease-file. +func (f *FileClient) Release(ctx context.Context, options *FileReleaseOptions) (FileReleaseResponse, error) { + if f.LeaseID() == nil { + return FileReleaseResponse{}, errors.New("leaseID cannot be nil") + } + + opts := options.format() + resp, err := f.generated().ReleaseLease(ctx, *f.LeaseID(), opts) + return resp, err +} diff --git a/sdk/storage/azfile/lease/models.go b/sdk/storage/azfile/lease/models.go new file mode 100644 index 000000000000..0de250f8aeb4 --- /dev/null +++ b/sdk/storage/azfile/lease/models.go @@ -0,0 +1,147 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package lease + +import "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" + +// AccessConditions contains optional parameters to access leased entity. +type AccessConditions = generated.LeaseAccessConditions + +// FileAcquireOptions contains the optional parameters for the FileClient.Acquire method. +type FileAcquireOptions struct { + // placeholder for future options +} + +func (o *FileAcquireOptions) format(proposedLeaseID *string) *generated.FileClientAcquireLeaseOptions { + return &generated.FileClientAcquireLeaseOptions{ + ProposedLeaseID: proposedLeaseID, + } +} + +// FileBreakOptions contains the optional parameters for the FileClient.Break method. +type FileBreakOptions struct { + // AccessConditions contains optional parameters to access leased entity. + AccessConditions *AccessConditions +} + +func (o *FileBreakOptions) format() (*generated.FileClientBreakLeaseOptions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil + } + + return nil, o.AccessConditions +} + +// FileChangeOptions contains the optional parameters for the FileClient.Change method. +type FileChangeOptions struct { + // placeholder for future options +} + +func (o *FileChangeOptions) format(proposedLeaseID *string) *generated.FileClientChangeLeaseOptions { + return &generated.FileClientChangeLeaseOptions{ + ProposedLeaseID: proposedLeaseID, + } +} + +// FileReleaseOptions contains the optional parameters for the FileClient.Release method. +type FileReleaseOptions struct { + // placeholder for future options +} + +func (o *FileReleaseOptions) format() *generated.FileClientReleaseLeaseOptions { + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ShareAcquireOptions contains the optional parameters for the ShareClient.Acquire method. +type ShareAcquireOptions struct { + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + ShareSnapshot *string +} + +func (o *ShareAcquireOptions) format(proposedLeaseID *string) *generated.ShareClientAcquireLeaseOptions { + opts := &generated.ShareClientAcquireLeaseOptions{ + ProposedLeaseID: proposedLeaseID, + } + if o != nil { + opts.Sharesnapshot = o.ShareSnapshot + } + return opts +} + +// ShareBreakOptions contains the optional parameters for the ShareClient.Break method. +type ShareBreakOptions struct { + // For a break operation, this is the proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This + // break period is only used if it is shorter than the time remaining on the + // lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has + // expired, but the lease may be held for longer than the break period. If this + // header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, + // and an infinite lease breaks immediately. + BreakPeriod *int32 + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + ShareSnapshot *string + // AccessConditions contains optional parameters to access leased entity. + AccessConditions *AccessConditions +} + +func (o *ShareBreakOptions) format() (*generated.ShareClientBreakLeaseOptions, *generated.LeaseAccessConditions) { + if o == nil { + return nil, nil + } + + return &generated.ShareClientBreakLeaseOptions{ + BreakPeriod: o.BreakPeriod, + Sharesnapshot: o.ShareSnapshot, + }, o.AccessConditions +} + +// ShareChangeOptions contains the optional parameters for the ShareClient.Change method. +type ShareChangeOptions struct { + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + ShareSnapshot *string +} + +func (o *ShareChangeOptions) format(proposedLeaseID *string) *generated.ShareClientChangeLeaseOptions { + opts := &generated.ShareClientChangeLeaseOptions{ + ProposedLeaseID: proposedLeaseID, + } + if o != nil { + opts.Sharesnapshot = o.ShareSnapshot + } + return opts +} + +// ShareReleaseOptions contains the optional parameters for the ShareClient.Release method. +type ShareReleaseOptions struct { + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + ShareSnapshot *string +} + +func (o *ShareReleaseOptions) format() *generated.ShareClientReleaseLeaseOptions { + if o == nil { + return nil + } + return &generated.ShareClientReleaseLeaseOptions{ + Sharesnapshot: o.ShareSnapshot, + } +} + +// ShareRenewOptions contains the optional parameters for the ShareClient.Renew method. +type ShareRenewOptions struct { + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + ShareSnapshot *string +} + +func (o *ShareRenewOptions) format() *generated.ShareClientRenewLeaseOptions { + if o == nil { + return nil + } + return &generated.ShareClientRenewLeaseOptions{ + Sharesnapshot: o.ShareSnapshot, + } +} diff --git a/sdk/storage/azfile/lease/responses.go b/sdk/storage/azfile/lease/responses.go new file mode 100644 index 000000000000..23a5a1db3063 --- /dev/null +++ b/sdk/storage/azfile/lease/responses.go @@ -0,0 +1,36 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package lease + +import "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" + +// FileAcquireResponse contains the response from method FileClient.Acquire. +type FileAcquireResponse = generated.FileClientAcquireLeaseResponse + +// FileBreakResponse contains the response from method FileClient.Break. +type FileBreakResponse = generated.FileClientBreakLeaseResponse + +// FileChangeResponse contains the response from method FileClient.Change. +type FileChangeResponse = generated.FileClientChangeLeaseResponse + +// FileReleaseResponse contains the response from method FileClient.Release. +type FileReleaseResponse = generated.FileClientReleaseLeaseResponse + +// ShareAcquireResponse contains the response from method ShareClient.Acquire. +type ShareAcquireResponse = generated.ShareClientAcquireLeaseResponse + +// ShareBreakResponse contains the response from method ShareClient.Break. +type ShareBreakResponse = generated.ShareClientBreakLeaseResponse + +// ShareChangeResponse contains the response from method ShareClient.Change. +type ShareChangeResponse = generated.ShareClientChangeLeaseResponse + +// ShareReleaseResponse contains the response from method ShareClient.Release. +type ShareReleaseResponse = generated.ShareClientReleaseLeaseResponse + +// ShareRenewResponse contains the response from method ShareClient.Renew. +type ShareRenewResponse = generated.ShareClientRenewLeaseResponse diff --git a/sdk/storage/azfile/lease/share_client.go b/sdk/storage/azfile/lease/share_client.go new file mode 100644 index 000000000000..ff4db564c57f --- /dev/null +++ b/sdk/storage/azfile/lease/share_client.go @@ -0,0 +1,116 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package lease + +import ( + "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/share" +) + +// ShareClient provides lease functionality for the underlying share client. +type ShareClient struct { + shareClient *share.Client + leaseID *string +} + +// ShareClientOptions contains the optional values when creating a ShareClient. +type ShareClientOptions struct { + // LeaseID contains a caller-provided lease ID. + LeaseID *string +} + +// NewShareClient creates a share lease client for the provided share client. +// - client - an instance of a share client +// - options - client options; pass nil to accept the default values +func NewShareClient(client *share.Client, options *ShareClientOptions) (*ShareClient, error) { + var leaseID *string + if options != nil { + leaseID = options.LeaseID + } + + leaseID, err := shared.GenerateLeaseID(leaseID) + if err != nil { + return nil, err + } + + return &ShareClient{ + shareClient: client, + leaseID: leaseID, + }, nil +} + +func (s *ShareClient) generated() *generated.ShareClient { + return base.InnerClient((*base.Client[generated.ShareClient])(s.shareClient)) +} + +// LeaseID returns leaseID of the client. +func (s *ShareClient) LeaseID() *string { + return s.leaseID +} + +// Acquire operation can be used to request a new lease. +// The lease duration must be between 15 and 60 seconds, or infinite (-1). +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/lease-share. +func (s *ShareClient) Acquire(ctx context.Context, duration int32, options *ShareAcquireOptions) (ShareAcquireResponse, error) { + opts := options.format(s.LeaseID()) + resp, err := s.generated().AcquireLease(ctx, duration, opts) + return resp, err +} + +// Break operation can be used to break the lease, if the file share has an active lease. Once a lease is broken, it cannot be renewed. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/lease-share. +func (s *ShareClient) Break(ctx context.Context, options *ShareBreakOptions) (ShareBreakResponse, error) { + opts, leaseAccessConditions := options.format() + resp, err := s.generated().BreakLease(ctx, opts, leaseAccessConditions) + return resp, err +} + +// Change operation can be used to change the lease ID of an active lease. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/lease-share. +func (s *ShareClient) Change(ctx context.Context, proposedLeaseID string, options *ShareChangeOptions) (ShareChangeResponse, error) { + if s.LeaseID() == nil { + return ShareChangeResponse{}, errors.New("leaseID cannot be nil") + } + + opts := options.format(&proposedLeaseID) + resp, err := s.generated().ChangeLease(ctx, *s.LeaseID(), opts) + + // If lease has been changed successfully, set the leaseID in client + if err == nil { + s.leaseID = &proposedLeaseID + } + + return resp, err +} + +// Release operation can be used to free the lease if it is no longer needed so that another client may immediately acquire a lease against the file share. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/lease-share. +func (s *ShareClient) Release(ctx context.Context, options *ShareReleaseOptions) (ShareReleaseResponse, error) { + if s.LeaseID() == nil { + return ShareReleaseResponse{}, errors.New("leaseID cannot be nil") + } + + opts := options.format() + resp, err := s.generated().ReleaseLease(ctx, *s.LeaseID(), opts) + return resp, err +} + +// Renew operation can be used to renew an existing lease. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/lease-share. +func (s *ShareClient) Renew(ctx context.Context, options *ShareRenewOptions) (ShareRenewResponse, error) { + if s.LeaseID() == nil { + return ShareRenewResponse{}, errors.New("leaseID cannot be nil") + } + + opts := options.format() + resp, err := s.generated().RenewLease(ctx, *s.LeaseID(), opts) + return resp, err +} diff --git a/sdk/storage/azfile/log.go b/sdk/storage/azfile/log.go new file mode 100644 index 000000000000..f59215653531 --- /dev/null +++ b/sdk/storage/azfile/log.go @@ -0,0 +1,16 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azfile + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/exported" +) + +const ( + // EventUpload is used for logging events related to upload operation. + EventUpload = exported.EventUpload +) diff --git a/sdk/storage/azfile/sas/account.go b/sdk/storage/azfile/sas/account.go new file mode 100644 index 000000000000..6b0c0067e811 --- /dev/null +++ b/sdk/storage/azfile/sas/account.go @@ -0,0 +1,183 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package sas + +import ( + "bytes" + "errors" + "fmt" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/exported" +) + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential = exported.SharedKeyCredential + +// AccountSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage account. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-an-account-sas +type AccountSignatureValues struct { + Version string `param:"sv"` // If not specified, this format to SASVersion + Protocol Protocol `param:"spr"` // See the SASProtocol* constants + StartTime time.Time `param:"st"` // Not specified if IsZero + ExpiryTime time.Time `param:"se"` // Not specified if IsZero + Permissions string `param:"sp"` // Create by initializing AccountPermissions and then call String() + IPRange IPRange `param:"sip"` + ResourceTypes string `param:"srt"` // Create by initializing AccountResourceTypes and then call String() +} + +// SignWithSharedKey uses an account's shared key credential to sign this signature values to produce +// the proper SAS query parameters. +func (v AccountSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCredential) (QueryParameters, error) { + // https://docs.microsoft.com/en-us/rest/api/storageservices/Constructing-an-Account-SAS + if v.ExpiryTime.IsZero() || v.Permissions == "" || v.ResourceTypes == "" { + return QueryParameters{}, errors.New("account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType") + } + if v.Version == "" { + v.Version = Version + } + perms, err := parseAccountPermissions(v.Permissions) + if err != nil { + return QueryParameters{}, err + } + v.Permissions = perms.String() + + resources, err := parseAccountResourceTypes(v.ResourceTypes) + if err != nil { + return QueryParameters{}, err + } + v.ResourceTypes = resources.String() + + startTime, expiryTime, _ := formatTimesForSigning(v.StartTime, v.ExpiryTime, time.Time{}) + + stringToSign := strings.Join([]string{ + sharedKeyCredential.AccountName(), + v.Permissions, + "f", // file service + v.ResourceTypes, + startTime, + expiryTime, + v.IPRange.String(), + string(v.Protocol), + v.Version, + ""}, // That is right, the account SAS requires a terminating extra newline + "\n") + + signature, err := exported.ComputeHMACSHA256(sharedKeyCredential, stringToSign) + if err != nil { + return QueryParameters{}, err + } + p := QueryParameters{ + // Common SAS parameters + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + + // Account-specific SAS parameters + services: "f", // will always be "f" for Azure File + resourceTypes: v.ResourceTypes, + + // Calculated SAS signature + signature: signature, + } + + return p, nil +} + +// AccountPermissions type simplifies creating the permissions string for an Azure Storage Account SAS. +// Initialize an instance of this type and then call its String method to set AccountSignatureValues' Permissions field. +type AccountPermissions struct { + Read, Write, Delete, List, Create bool +} + +// String produces the SAS permissions string for an Azure Storage account. +// Call this method to set AccountSignatureValues' Permissions field. +func (p *AccountPermissions) String() string { + var buffer bytes.Buffer + if p.Read { + buffer.WriteRune('r') + } + if p.Write { + buffer.WriteRune('w') + } + if p.Delete { + buffer.WriteRune('d') + } + if p.List { + buffer.WriteRune('l') + } + if p.Create { + buffer.WriteRune('c') + } + return buffer.String() +} + +// parseAccountPermissions initializes the AccountPermissions' fields from a string. +func parseAccountPermissions(s string) (AccountPermissions, error) { + p := AccountPermissions{} // Clear out the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'l': + p.List = true + case 'c': + p.Create = true + default: + return AccountPermissions{}, fmt.Errorf("invalid permission character: '%v'", r) + } + } + return p, nil +} + +// AccountResourceTypes type simplifies creating the resource types string for an Azure Storage Account SAS. +// Initialize an instance of this type and then call its String method to set AccountSignatureValues' ResourceTypes field. +type AccountResourceTypes struct { + Service, Container, Object bool +} + +// String produces the SAS resource types string for an Azure Storage account. +// Call this method to set AccountSignatureValues' ResourceTypes field. +func (rt *AccountResourceTypes) String() string { + var buffer bytes.Buffer + if rt.Service { + buffer.WriteRune('s') + } + if rt.Container { + buffer.WriteRune('c') + } + if rt.Object { + buffer.WriteRune('o') + } + return buffer.String() +} + +// parseAccountResourceTypes initializes the AccountResourceTypes' fields from a string. +func parseAccountResourceTypes(s string) (AccountResourceTypes, error) { + rt := AccountResourceTypes{} + for _, r := range s { + switch r { + case 's': + rt.Service = true + case 'c': + rt.Container = true + case 'o': + rt.Object = true + default: + return AccountResourceTypes{}, fmt.Errorf("invalid resource type character: '%v'", r) + } + } + return rt, nil +} diff --git a/sdk/storage/azfile/sas/account_test.go b/sdk/storage/azfile/sas/account_test.go new file mode 100644 index 000000000000..d22d645185ed --- /dev/null +++ b/sdk/storage/azfile/sas/account_test.go @@ -0,0 +1,124 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package sas + +import ( + "github.com/stretchr/testify/require" + "testing" +) + +func TestAccountPermissions_String(t *testing.T) { + testdata := []struct { + input AccountPermissions + expected string + }{ + {input: AccountPermissions{Read: true}, expected: "r"}, + {input: AccountPermissions{Write: true}, expected: "w"}, + {input: AccountPermissions{Delete: true}, expected: "d"}, + {input: AccountPermissions{List: true}, expected: "l"}, + {input: AccountPermissions{Create: true}, expected: "c"}, + {input: AccountPermissions{ + Read: true, + Write: true, + Delete: true, + List: true, + Create: true, + }, expected: "rwdlc"}, + } + for _, c := range testdata { + require.Equal(t, c.expected, c.input.String()) + } +} + +func TestAccountPermissions_Parse(t *testing.T) { + testdata := []struct { + input string + expected AccountPermissions + }{ + {expected: AccountPermissions{Read: true}, input: "r"}, + {expected: AccountPermissions{Write: true}, input: "w"}, + {expected: AccountPermissions{Delete: true}, input: "d"}, + {expected: AccountPermissions{List: true}, input: "l"}, + {expected: AccountPermissions{Create: true}, input: "c"}, + {expected: AccountPermissions{ + Read: true, + Write: true, + Delete: true, + List: true, + Create: true, + }, input: "rwdlc"}, + {expected: AccountPermissions{ + Read: true, + Write: true, + Delete: true, + List: true, + Create: true, + }, input: "rcdlw"}, + } + for _, c := range testdata { + permissions, err := parseAccountPermissions(c.input) + require.Nil(t, err) + require.Equal(t, c.expected, permissions) + } +} + +func TestAccountPermissions_ParseNegative(t *testing.T) { + _, err := parseAccountPermissions("rwldcz") // Here 'z' is invalid + require.NotNil(t, err) + require.Contains(t, err.Error(), "122") +} + +func TestAccountResourceTypes_String(t *testing.T) { + testdata := []struct { + input AccountResourceTypes + expected string + }{ + {input: AccountResourceTypes{Service: true}, expected: "s"}, + {input: AccountResourceTypes{Container: true}, expected: "c"}, + {input: AccountResourceTypes{Object: true}, expected: "o"}, + {input: AccountResourceTypes{ + Service: true, + Container: true, + Object: true, + }, expected: "sco"}, + } + for _, c := range testdata { + require.Equal(t, c.expected, c.input.String()) + } +} + +func TestAccountResourceTypes_Parse(t *testing.T) { + testdata := []struct { + input string + expected AccountResourceTypes + }{ + {expected: AccountResourceTypes{Service: true}, input: "s"}, + {expected: AccountResourceTypes{Container: true}, input: "c"}, + {expected: AccountResourceTypes{Object: true}, input: "o"}, + {expected: AccountResourceTypes{ + Service: true, + Container: true, + Object: true, + }, input: "sco"}, + {expected: AccountResourceTypes{ + Service: true, + Container: true, + Object: true, + }, input: "osc"}, + } + for _, c := range testdata { + permissions, err := parseAccountResourceTypes(c.input) + require.Nil(t, err) + require.Equal(t, c.expected, permissions) + } +} + +func TestAccountResourceTypes_ParseNegative(t *testing.T) { + _, err := parseAccountResourceTypes("scoz") // Here 'z' is invalid + require.NotNil(t, err) + require.Contains(t, err.Error(), "122") +} diff --git a/sdk/storage/azfile/sas/query_params.go b/sdk/storage/azfile/sas/query_params.go new file mode 100644 index 000000000000..5bf5422d6082 --- /dev/null +++ b/sdk/storage/azfile/sas/query_params.go @@ -0,0 +1,339 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package sas + +import ( + "errors" + "net" + "net/url" + "strings" + "time" +) + +// timeFormat represents the format of a SAS start or expiry time. Use it when formatting/parsing a time.Time. +const ( + timeFormat = "2006-01-02T15:04:05Z" // "2017-07-27T00:00:00Z" // ISO 8601 + SnapshotTimeFormat = "2006-01-02T15:04:05.0000000Z07:00" +) + +var ( + // Version is the default version encoded in the SAS token. + Version = "2020-02-10" +) + +// TimeFormats ISO 8601 format. +// Please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details. +var timeFormats = []string{"2006-01-02T15:04:05.0000000Z", timeFormat, "2006-01-02T15:04Z", "2006-01-02"} + +// Protocol indicates the http/https. +type Protocol string + +const ( + // ProtocolHTTPS can be specified for a SAS protocol. + ProtocolHTTPS Protocol = "https" + + // ProtocolHTTPSandHTTP can be specified for a SAS protocol. + ProtocolHTTPSandHTTP Protocol = "https,http" +) + +// FormatTimesForSigning converts a time.Time to a SnapshotTimeFormat string suitable for a +// Field's StartTime or ExpiryTime fields. Returns "" if value.IsZero(). +func formatTimesForSigning(startTime, expiryTime, snapshotTime time.Time) (string, string, string) { + ss := "" + if !startTime.IsZero() { + ss = formatTimeWithDefaultFormat(&startTime) + } + se := "" + if !expiryTime.IsZero() { + se = formatTimeWithDefaultFormat(&expiryTime) + } + sh := "" + if !snapshotTime.IsZero() { + sh = snapshotTime.Format(SnapshotTimeFormat) + } + return ss, se, sh +} + +// formatTimeWithDefaultFormat format time with ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ". +func formatTimeWithDefaultFormat(t *time.Time) string { + return formatTime(t, timeFormat) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used +} + +// formatTime format time with given format, use ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ" by default. +func formatTime(t *time.Time, format string) string { + if format != "" { + return t.Format(format) + } + return t.Format(timeFormat) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used +} + +// ParseTime try to parse a SAS time string. +func parseTime(val string) (t time.Time, timeFormat string, err error) { + for _, sasTimeFormat := range timeFormats { + t, err = time.Parse(sasTimeFormat, val) + if err == nil { + timeFormat = sasTimeFormat + break + } + } + + if err != nil { + err = errors.New("fail to parse time with IOS 8601 formats, please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details") + } + + return +} + +// IPRange represents a SAS IP range's start IP and (optionally) end IP. +type IPRange struct { + Start net.IP // Not specified if length = 0 + End net.IP // Not specified if length = 0 +} + +// String returns a string representation of an IPRange. +func (ipr *IPRange) String() string { + if len(ipr.Start) == 0 { + return "" + } + start := ipr.Start.String() + if len(ipr.End) == 0 { + return start + } + return start + "-" + ipr.End.String() +} + +// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas + +// QueryParameters object represents the components that make up an Azure Storage SAS' query parameters. +// You parse a map of query parameters into its fields by calling NewQueryParameters(). You add the components +// to a query parameter map by calling AddToValues(). +// NOTE: Changing any field requires computing a new SAS signature using a XxxSASSignatureValues type. +// This type defines the components used by all Azure Storage resources (Containers, Blobs, Files, & Queues). +type QueryParameters struct { + // All members are immutable or values so copies of this struct are goroutine-safe. + version string `param:"sv"` + services string `param:"ss"` + resourceTypes string `param:"srt"` + protocol Protocol `param:"spr"` + startTime time.Time `param:"st"` + expiryTime time.Time `param:"se"` + shareSnapshotTime time.Time `param:"sharesnapshot"` + ipRange IPRange `param:"sip"` + identifier string `param:"si"` + resource string `param:"sr"` + permissions string `param:"sp"` + signature string `param:"sig"` + cacheControl string `param:"rscc"` + contentDisposition string `param:"rscd"` + contentEncoding string `param:"rsce"` + contentLanguage string `param:"rscl"` + contentType string `param:"rsct"` + // private member used for startTime and expiryTime formatting. + stTimeFormat string + seTimeFormat string +} + +// ShareSnapshotTime returns shareSnapshotTime. +func (p *QueryParameters) ShareSnapshotTime() time.Time { + return p.shareSnapshotTime +} + +// Version returns version. +func (p *QueryParameters) Version() string { + return p.version +} + +// Services returns services. +func (p *QueryParameters) Services() string { + return p.services +} + +// ResourceTypes returns resourceTypes. +func (p *QueryParameters) ResourceTypes() string { + return p.resourceTypes +} + +// Protocol returns protocol. +func (p *QueryParameters) Protocol() Protocol { + return p.protocol +} + +// StartTime returns startTime. +func (p *QueryParameters) StartTime() time.Time { + return p.startTime +} + +// ExpiryTime returns expiryTime. +func (p *QueryParameters) ExpiryTime() time.Time { + return p.expiryTime +} + +// IPRange returns ipRange. +func (p *QueryParameters) IPRange() IPRange { + return p.ipRange +} + +// Identifier returns identifier. +func (p *QueryParameters) Identifier() string { + return p.identifier +} + +// Resource returns resource. +func (p *QueryParameters) Resource() string { + return p.resource +} + +// Permissions returns permissions. +func (p *QueryParameters) Permissions() string { + return p.permissions +} + +// Signature returns signature. +func (p *QueryParameters) Signature() string { + return p.signature +} + +// CacheControl returns cacheControl. +func (p *QueryParameters) CacheControl() string { + return p.cacheControl +} + +// ContentDisposition returns contentDisposition. +func (p *QueryParameters) ContentDisposition() string { + return p.contentDisposition +} + +// ContentEncoding returns contentEncoding. +func (p *QueryParameters) ContentEncoding() string { + return p.contentEncoding +} + +// ContentLanguage returns contentLanguage. +func (p *QueryParameters) ContentLanguage() string { + return p.contentLanguage +} + +// ContentType returns contentType. +func (p *QueryParameters) ContentType() string { + return p.contentType +} + +// Encode encodes the SAS query parameters into URL encoded form sorted by key. +func (p *QueryParameters) Encode() string { + v := url.Values{} + + if p.version != "" { + v.Add("sv", p.version) + } + if p.services != "" { + v.Add("ss", p.services) + } + if p.resourceTypes != "" { + v.Add("srt", p.resourceTypes) + } + if p.protocol != "" { + v.Add("spr", string(p.protocol)) + } + if !p.startTime.IsZero() { + v.Add("st", formatTime(&(p.startTime), p.stTimeFormat)) + } + if !p.expiryTime.IsZero() { + v.Add("se", formatTime(&(p.expiryTime), p.seTimeFormat)) + } + if len(p.ipRange.Start) > 0 { + v.Add("sip", p.ipRange.String()) + } + if p.identifier != "" { + v.Add("si", p.identifier) + } + if p.resource != "" { + v.Add("sr", p.resource) + } + if p.permissions != "" { + v.Add("sp", p.permissions) + } + if p.signature != "" { + v.Add("sig", p.signature) + } + if p.cacheControl != "" { + v.Add("rscc", p.cacheControl) + } + if p.contentDisposition != "" { + v.Add("rscd", p.contentDisposition) + } + if p.contentEncoding != "" { + v.Add("rsce", p.contentEncoding) + } + if p.contentLanguage != "" { + v.Add("rscl", p.contentLanguage) + } + if p.contentType != "" { + v.Add("rsct", p.contentType) + } + + return v.Encode() +} + +// NewQueryParameters creates and initializes a QueryParameters object based on the +// query parameter map's passed-in values. If deleteSASParametersFromValues is true, +// all SAS-related query parameters are removed from the passed-in map. If +// deleteSASParametersFromValues is false, the map passed-in map is unaltered. +func NewQueryParameters(values url.Values, deleteSASParametersFromValues bool) QueryParameters { + p := QueryParameters{} + for k, v := range values { + val := v[0] + isSASKey := true + switch strings.ToLower(k) { + case "sv": + p.version = val + case "ss": + p.services = val + case "srt": + p.resourceTypes = val + case "spr": + p.protocol = Protocol(val) + case "sharesnapshot": + p.shareSnapshotTime, _ = time.Parse(SnapshotTimeFormat, val) + case "st": + p.startTime, p.stTimeFormat, _ = parseTime(val) + case "se": + p.expiryTime, p.seTimeFormat, _ = parseTime(val) + case "sip": + dashIndex := strings.Index(val, "-") + if dashIndex == -1 { + p.ipRange.Start = net.ParseIP(val) + } else { + p.ipRange.Start = net.ParseIP(val[:dashIndex]) + p.ipRange.End = net.ParseIP(val[dashIndex+1:]) + } + case "si": + p.identifier = val + case "sr": + p.resource = val + case "sp": + p.permissions = val + case "sig": + p.signature = val + case "rscc": + p.cacheControl = val + case "rscd": + p.contentDisposition = val + case "rsce": + p.contentEncoding = val + case "rscl": + p.contentLanguage = val + case "rsct": + p.contentType = val + default: + isSASKey = false // We didn't recognize the query parameter + } + if isSASKey && deleteSASParametersFromValues { + delete(values, k) + } + } + return p +} diff --git a/sdk/storage/azfile/sas/query_params_test.go b/sdk/storage/azfile/sas/query_params_test.go new file mode 100644 index 000000000000..7d699f9c3396 --- /dev/null +++ b/sdk/storage/azfile/sas/query_params_test.go @@ -0,0 +1,211 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package sas + +import ( + "fmt" + "net" + "net/url" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestFormatTimesForSigning(t *testing.T) { + testdata := []struct { + inputStart time.Time + inputEnd time.Time + inputSnapshot time.Time + expectedStart string + expectedEnd string + expectedSnapshot string + }{ + {expectedStart: "", expectedEnd: "", expectedSnapshot: ""}, + {inputStart: time.Date(1955, 6, 25, 22, 15, 56, 345456, time.UTC), expectedStart: "1955-06-25T22:15:56Z", expectedEnd: "", expectedSnapshot: ""}, + {inputEnd: time.Date(2023, 4, 5, 8, 50, 27, 4500, time.UTC), expectedStart: "", expectedEnd: "2023-04-05T08:50:27Z", expectedSnapshot: ""}, + {inputSnapshot: time.Date(2021, 1, 5, 22, 15, 33, 1234879, time.UTC), expectedStart: "", expectedEnd: "", expectedSnapshot: "2021-01-05T22:15:33.0012348Z"}, + { + inputStart: time.Date(1955, 6, 25, 22, 15, 56, 345456, time.UTC), + inputEnd: time.Date(2023, 4, 5, 8, 50, 27, 4500, time.UTC), + inputSnapshot: time.Date(2021, 1, 5, 22, 15, 33, 1234879, time.UTC), + expectedStart: "1955-06-25T22:15:56Z", + expectedEnd: "2023-04-05T08:50:27Z", + expectedSnapshot: "2021-01-05T22:15:33.0012348Z", + }, + } + for _, c := range testdata { + start, end, ss := formatTimesForSigning(c.inputStart, c.inputEnd, c.inputSnapshot) + require.Equal(t, c.expectedStart, start) + require.Equal(t, c.expectedEnd, end) + require.Equal(t, c.expectedSnapshot, ss) + } +} + +func TestFormatTimeWithDefaultFormat(t *testing.T) { + testdata := []struct { + input time.Time + expectedTime string + }{ + {input: time.Date(1955, 4, 5, 8, 50, 27, 4500, time.UTC), expectedTime: "1955-04-05T08:50:27Z"}, + {input: time.Date(1917, 3, 9, 16, 22, 56, 0, time.UTC), expectedTime: "1917-03-09T16:22:56Z"}, + {input: time.Date(2021, 1, 5, 22, 15, 0, 0, time.UTC), expectedTime: "2021-01-05T22:15:00Z"}, + {input: time.Date(2023, 6, 25, 0, 0, 0, 0, time.UTC), expectedTime: "2023-06-25T00:00:00Z"}, + } + for _, c := range testdata { + formattedTime := formatTimeWithDefaultFormat(&c.input) + require.Equal(t, c.expectedTime, formattedTime) + } +} + +func TestFormatTime(t *testing.T) { + testdata := []struct { + input time.Time + format string + expectedTime string + }{ + {input: time.Date(1955, 4, 5, 8, 50, 27, 4500, time.UTC), format: "2006-01-02T15:04:05.0000000Z", expectedTime: "1955-04-05T08:50:27.0000045Z"}, + {input: time.Date(1955, 4, 5, 8, 50, 27, 4500, time.UTC), format: "", expectedTime: "1955-04-05T08:50:27Z"}, + {input: time.Date(1917, 3, 9, 16, 22, 56, 0, time.UTC), format: "2006-01-02T15:04:05Z", expectedTime: "1917-03-09T16:22:56Z"}, + {input: time.Date(1917, 3, 9, 16, 22, 56, 0, time.UTC), format: "", expectedTime: "1917-03-09T16:22:56Z"}, + {input: time.Date(2021, 1, 5, 22, 15, 0, 0, time.UTC), format: "2006-01-02T15:04Z", expectedTime: "2021-01-05T22:15Z"}, + {input: time.Date(2021, 1, 5, 22, 15, 0, 0, time.UTC), format: "", expectedTime: "2021-01-05T22:15:00Z"}, + {input: time.Date(2023, 6, 25, 0, 0, 0, 0, time.UTC), format: "2006-01-02", expectedTime: "2023-06-25"}, + {input: time.Date(2023, 6, 25, 0, 0, 0, 0, time.UTC), format: "", expectedTime: "2023-06-25T00:00:00Z"}, + } + for _, c := range testdata { + formattedTime := formatTime(&c.input, c.format) + require.Equal(t, c.expectedTime, formattedTime) + } +} + +func TestParseTime(t *testing.T) { + testdata := []struct { + input string + expectedTime time.Time + expectedFormat string + }{ + {input: "1955-04-05T08:50:27.0000045Z", expectedTime: time.Date(1955, 4, 5, 8, 50, 27, 4500, time.UTC), expectedFormat: "2006-01-02T15:04:05.0000000Z"}, + {input: "1917-03-09T16:22:56Z", expectedTime: time.Date(1917, 3, 9, 16, 22, 56, 0, time.UTC), expectedFormat: "2006-01-02T15:04:05Z"}, + {input: "2021-01-05T22:15Z", expectedTime: time.Date(2021, 1, 5, 22, 15, 0, 0, time.UTC), expectedFormat: "2006-01-02T15:04Z"}, + {input: "2023-06-25", expectedTime: time.Date(2023, 6, 25, 0, 0, 0, 0, time.UTC), expectedFormat: "2006-01-02"}, + } + for _, c := range testdata { + parsedTime, format, err := parseTime(c.input) + require.Nil(t, err) + require.Equal(t, c.expectedTime, parsedTime) + require.Equal(t, c.expectedFormat, format) + } +} + +func TestParseTimeNegative(t *testing.T) { + _, _, err := parseTime("notatime") + require.Error(t, err, "fail to parse time with IOS 8601 formats, please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details") +} + +func TestIPRange_String(t *testing.T) { + testdata := []struct { + inputStart net.IP + inputEnd net.IP + expected string + }{ + {expected: ""}, + {inputStart: net.IPv4(10, 255, 0, 0), expected: "10.255.0.0"}, + {inputStart: net.IPv4(10, 255, 0, 0), inputEnd: net.IPv4(10, 255, 0, 50), expected: "10.255.0.0-10.255.0.50"}, + } + for _, c := range testdata { + var ipRange IPRange + if c.inputStart != nil { + ipRange.Start = c.inputStart + } + if c.inputEnd != nil { + ipRange.End = c.inputEnd + } + require.Equal(t, c.expected, ipRange.String()) + } +} + +func TestSAS(t *testing.T) { + // Note: This is a totally invalid fake SAS, this is just testing our ability to parse different query parameters on a SAS + const sas = "sv=2019-12-12&sr=b&st=2111-01-09T01:42:34.936Z&se=2222-03-09T01:42:34.936Z&sp=rw&sip=168.1.5.60-168.1.5.70&spr=https,http&si=myIdentifier&ss=bf&srt=s&rscc=cc&rscd=cd&rsce=ce&rscl=cl&rsct=ct&sig=clNxbtnkKSHw7f3KMEVVc4agaszoRFdbZr%2FWBmPNsrw%3D" + _url := fmt.Sprintf("https://teststorageaccount.file.core.windows.net/testshare/testpath?%s", sas) + _uri, err := url.Parse(_url) + require.NoError(t, err) + sasQueryParams := NewQueryParameters(_uri.Query(), true) + validateSAS(t, sas, sasQueryParams) +} + +func validateSAS(t *testing.T, sas string, parameters QueryParameters) { + sasCompMap := make(map[string]string) + for _, sasComp := range strings.Split(sas, "&") { + comp := strings.Split(sasComp, "=") + sasCompMap[comp[0]] = comp[1] + } + + require.Equal(t, parameters.Version(), sasCompMap["sv"]) + require.Equal(t, parameters.Services(), sasCompMap["ss"]) + require.Equal(t, parameters.ResourceTypes(), sasCompMap["srt"]) + require.Equal(t, string(parameters.Protocol()), sasCompMap["spr"]) + if _, ok := sasCompMap["st"]; ok { + startTime, _, err := parseTime(sasCompMap["st"]) + require.NoError(t, err) + require.Equal(t, parameters.StartTime(), startTime) + } + if _, ok := sasCompMap["se"]; ok { + endTime, _, err := parseTime(sasCompMap["se"]) + require.NoError(t, err) + require.Equal(t, parameters.ExpiryTime(), endTime) + } + + if _, ok := sasCompMap["sharesnapshot"]; ok { + snapshotTime, _, err := parseTime(sasCompMap["sharesnapshot"]) + require.NoError(t, err) + require.Equal(t, parameters.ShareSnapshotTime(), snapshotTime) + } + ipRange := parameters.IPRange() + require.Equal(t, ipRange.String(), sasCompMap["sip"]) + require.Equal(t, parameters.Identifier(), sasCompMap["si"]) + require.Equal(t, parameters.Resource(), sasCompMap["sr"]) + require.Equal(t, parameters.Permissions(), sasCompMap["sp"]) + + sign, err := url.QueryUnescape(sasCompMap["sig"]) + require.NoError(t, err) + + require.Equal(t, parameters.Signature(), sign) + require.Equal(t, parameters.CacheControl(), sasCompMap["rscc"]) + require.Equal(t, parameters.ContentDisposition(), sasCompMap["rscd"]) + require.Equal(t, parameters.ContentEncoding(), sasCompMap["rsce"]) + require.Equal(t, parameters.ContentLanguage(), sasCompMap["rscl"]) + require.Equal(t, parameters.ContentType(), sasCompMap["rsct"]) +} + +func TestSASInvalidQueryParameter(t *testing.T) { + // Signature is invalid below + const sas = "sv=2019-12-12&signature=clNxbtnkKSHw7f3KMEVVc4agaszoRFdbZr%2FWBmPNsrw%3D&sr=b" + _url := fmt.Sprintf("https://teststorageaccount.file.core.windows.net/testshare/testpath?%s", sas) + _uri, err := url.Parse(_url) + require.NoError(t, err) + NewQueryParameters(_uri.Query(), true) + // NewQueryParameters should not delete signature + require.Contains(t, _uri.Query(), "signature") +} + +func TestEncode(t *testing.T) { + // Note: This is a totally invalid fake SAS, this is just testing our ability to parse different query parameters on a SAS + expected := "rscc=cc&rscd=cd&rsce=ce&rscl=cl&rsct=ct&se=2222-03-09T01%3A42%3A34Z&si=myIdentifier&sig=clNxbtnkKSHw7f3KMEVVc4agaszoRFdbZr%2FWBmPNsrw%3D&sip=168.1.5.60-168.1.5.70&sp=rw&spr=https%2Chttp&sr=b&srt=sco&ss=bf&st=2111-01-09T01%3A42%3A34Z&sv=2019-12-12" + randomOrder := "se=2222-03-09T01:42:34.936Z&rsce=ce&ss=bf&si=myIdentifier&sip=168.1.5.60-168.1.5.70&rscc=cc&srt=sco&sig=clNxbtnkKSHw7f3KMEVVc4agaszoRFdbZr%2FWBmPNsrw%3D&rsct=ct&rscl=cl&sv=2019-12-12&sr=b&st=2111-01-09T01:42:34.936Z&rscd=cd&sp=rw&spr=https,http" + testdata := []string{expected, randomOrder} + + for _, sas := range testdata { + _url := fmt.Sprintf("https://teststorageaccount.file.core.windows.net/testshare/testpath?%s", sas) + _uri, err := url.Parse(_url) + require.NoError(t, err) + queryParams := NewQueryParameters(_uri.Query(), true) + require.Equal(t, expected, queryParams.Encode()) + } +} diff --git a/sdk/storage/azfile/sas/service.go b/sdk/storage/azfile/sas/service.go new file mode 100644 index 000000000000..50192f9ef58b --- /dev/null +++ b/sdk/storage/azfile/sas/service.go @@ -0,0 +1,227 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package sas + +import ( + "bytes" + "errors" + "fmt" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/exported" +) + +// SignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage file or share. +// For more information on creating service sas, see https://docs.microsoft.com/rest/api/storageservices/constructing-a-service-sas +// User Delegation SAS not supported for files service +type SignatureValues struct { + Version string `param:"sv"` // If not specified, this defaults to Version + Protocol Protocol `param:"spr"` // See the Protocol* constants + StartTime time.Time `param:"st"` // Not specified if IsZero + ExpiryTime time.Time `param:"se"` // Not specified if IsZero + SnapshotTime time.Time + Permissions string `param:"sp"` // Create by initializing SharePermissions or FilePermissions and then call String() + IPRange IPRange `param:"sip"` + Identifier string `param:"si"` + ShareName string + FilePath string // Ex: "directory/FileName". Use "" to create a Share SAS and file path for File SAS. + CacheControl string // rscc + ContentDisposition string // rscd + ContentEncoding string // rsce + ContentLanguage string // rscl + ContentType string // rsct +} + +// SignWithSharedKey uses an account's SharedKeyCredential to sign this signature values to produce the proper SAS query parameters. +func (v SignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCredential) (QueryParameters, error) { + if v.ExpiryTime.IsZero() || v.Permissions == "" { + return QueryParameters{}, errors.New("service SAS is missing at least one of these: ExpiryTime or Permissions") + } + + resource := "s" + if v.FilePath == "" { + //Make sure the permission characters are in the correct order + perms, err := parseSharePermissions(v.Permissions) + if err != nil { + return QueryParameters{}, err + } + v.Permissions = perms.String() + } else { + resource = "f" + // Make sure the permission characters are in the correct order + perms, err := parseFilePermissions(v.Permissions) + if err != nil { + return QueryParameters{}, err + } + v.Permissions = perms.String() + } + + if v.Version == "" { + v.Version = Version + } + startTime, expiryTime, _ := formatTimesForSigning(v.StartTime, v.ExpiryTime, v.SnapshotTime) + + // String to sign: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx + stringToSign := strings.Join([]string{ + v.Permissions, + startTime, + expiryTime, + getCanonicalName(sharedKeyCredential.AccountName(), v.ShareName, v.FilePath), + v.Identifier, + v.IPRange.String(), + string(v.Protocol), + v.Version, + v.CacheControl, // rscc + v.ContentDisposition, // rscd + v.ContentEncoding, // rsce + v.ContentLanguage, // rscl + v.ContentType}, // rsct + "\n") + + signature, err := exported.ComputeHMACSHA256(sharedKeyCredential, stringToSign) + if err != nil { + return QueryParameters{}, err + } + + p := QueryParameters{ + // Common SAS parameters + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + + // Share/File-specific SAS parameters + resource: resource, + identifier: v.Identifier, + cacheControl: v.CacheControl, + contentDisposition: v.ContentDisposition, + contentEncoding: v.ContentEncoding, + contentLanguage: v.ContentLanguage, + contentType: v.ContentType, + shareSnapshotTime: v.SnapshotTime, + // Calculated SAS signature + signature: signature, + } + + return p, nil +} + +// getCanonicalName computes the canonical name for a share or file resource for SAS signing. +func getCanonicalName(account string, shareName string, filePath string) string { + // Share: "/file/account/sharename" + // File: "/file/account/sharename/filename" + // File: "/file/account/sharename/directoryname/filename" + elements := []string{"/file/", account, "/", shareName} + if filePath != "" { + dfp := strings.Replace(filePath, "\\", "/", -1) + if dfp[0] == '/' { + dfp = dfp[1:] + } + elements = append(elements, "/", dfp) + } + return strings.Join(elements, "") +} + +// SharePermissions type simplifies creating the permissions string for an Azure Storage share SAS. +// Initialize an instance of this type and then call its String method to set SignatureValues' Permissions field. +// All permissions descriptions can be found here: https://docs.microsoft.com/en-us/rest/api/storageservices/create-service-sas#permissions-for-a-share +type SharePermissions struct { + Read, Create, Write, Delete, List bool +} + +// String produces the SAS permissions string for an Azure Storage share. +// Call this method to set SignatureValues' Permissions field. +func (p *SharePermissions) String() string { + var b bytes.Buffer + if p.Read { + b.WriteRune('r') + } + if p.Create { + b.WriteRune('c') + } + if p.Write { + b.WriteRune('w') + } + if p.Delete { + b.WriteRune('d') + } + if p.List { + b.WriteRune('l') + } + return b.String() +} + +// parseSharePermissions initializes SharePermissions' fields from a string. +func parseSharePermissions(s string) (SharePermissions, error) { + p := SharePermissions{} // Clear the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'c': + p.Create = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'l': + p.List = true + default: + return SharePermissions{}, fmt.Errorf("invalid permission: '%v'", r) + } + } + return p, nil +} + +// FilePermissions type simplifies creating the permissions string for an Azure Storage file SAS. +// Initialize an instance of this type and then call its String method to set SignatureValues' Permissions field. +// All permissions descriptions can be found here: https://docs.microsoft.com/en-us/rest/api/storageservices/create-service-sas#permissions-for-a-file +type FilePermissions struct { + Read, Create, Write, Delete bool +} + +// String produces the SAS permissions string for an Azure Storage file. +// Call this method to set SignatureValues' Permissions field. +func (p *FilePermissions) String() string { + var b bytes.Buffer + if p.Read { + b.WriteRune('r') + } + if p.Create { + b.WriteRune('c') + } + if p.Write { + b.WriteRune('w') + } + if p.Delete { + b.WriteRune('d') + } + return b.String() +} + +// parseFilePermissions initializes the FilePermissions' fields from a string. +func parseFilePermissions(s string) (FilePermissions, error) { + p := FilePermissions{} // Clear the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'c': + p.Create = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + default: + return FilePermissions{}, fmt.Errorf("invalid permission: '%v'", r) + } + } + return p, nil +} diff --git a/sdk/storage/azfile/sas/service_test.go b/sdk/storage/azfile/sas/service_test.go new file mode 100644 index 000000000000..dd640be0e4fc --- /dev/null +++ b/sdk/storage/azfile/sas/service_test.go @@ -0,0 +1,147 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package sas + +import ( + "github.com/stretchr/testify/require" + "testing" +) + +func TestSharePermissions_String(t *testing.T) { + testdata := []struct { + input SharePermissions + expected string + }{ + {input: SharePermissions{Read: true}, expected: "r"}, + {input: SharePermissions{Create: true}, expected: "c"}, + {input: SharePermissions{Write: true}, expected: "w"}, + {input: SharePermissions{Delete: true}, expected: "d"}, + {input: SharePermissions{List: true}, expected: "l"}, + {input: SharePermissions{ + Read: true, + Create: true, + Write: true, + Delete: true, + List: true, + }, expected: "rcwdl"}, + } + for _, c := range testdata { + require.Equal(t, c.expected, c.input.String()) + } +} + +func TestSharePermissions_Parse(t *testing.T) { + testdata := []struct { + input string + expected SharePermissions + }{ + {expected: SharePermissions{Read: true}, input: "r"}, + {expected: SharePermissions{Create: true}, input: "c"}, + {expected: SharePermissions{Write: true}, input: "w"}, + {expected: SharePermissions{Delete: true}, input: "d"}, + {expected: SharePermissions{List: true}, input: "l"}, + {expected: SharePermissions{ + Read: true, + Create: true, + Write: true, + Delete: true, + List: true, + }, input: "rcwdl"}, + {expected: SharePermissions{ + Read: true, + Create: true, + Write: true, + Delete: true, + List: true, + }, input: "cwrdl"}, // Wrong order parses correctly + } + for _, c := range testdata { + permissions, err := parseSharePermissions(c.input) + require.Nil(t, err) + require.Equal(t, c.expected, permissions) + } +} + +func TestSharePermissions_ParseNegative(t *testing.T) { + _, err := parseSharePermissions("cwtrdl") // Here 't' is invalid + require.NotNil(t, err) + require.Contains(t, err.Error(), "116") +} + +func TestFilePermissions_String(t *testing.T) { + testdata := []struct { + input FilePermissions + expected string + }{ + {input: FilePermissions{Read: true}, expected: "r"}, + {input: FilePermissions{Create: true}, expected: "c"}, + {input: FilePermissions{Write: true}, expected: "w"}, + {input: FilePermissions{Delete: true}, expected: "d"}, + {input: FilePermissions{ + Read: true, + Create: true, + Write: true, + Delete: true, + }, expected: "rcwd"}, + } + for _, c := range testdata { + require.Equal(t, c.expected, c.input.String()) + } +} + +func TestFilePermissions_Parse(t *testing.T) { + testdata := []struct { + expected FilePermissions + input string + }{ + {expected: FilePermissions{Read: true}, input: "r"}, + {expected: FilePermissions{Create: true}, input: "c"}, + {expected: FilePermissions{Write: true}, input: "w"}, + {expected: FilePermissions{Delete: true}, input: "d"}, + {expected: FilePermissions{ + Read: true, + Create: true, + Write: true, + Delete: true, + }, input: "rcwd"}, + {expected: FilePermissions{ + Read: true, + Create: true, + Write: true, + Delete: true, + }, input: "wcrd"}, // Wrong order parses correctly + } + for _, c := range testdata { + permissions, err := parseFilePermissions(c.input) + require.Nil(t, err) + require.Equal(t, c.expected, permissions) + } +} + +func TestFilePermissions_ParseNegative(t *testing.T) { + _, err := parseFilePermissions("wcrdf") // Here 'f' is invalid + require.NotNil(t, err) + require.Contains(t, err.Error(), "102") +} + +func TestGetCanonicalName(t *testing.T) { + testdata := []struct { + inputAccount string + inputShare string + inputFilePath string + expected string + }{ + {inputAccount: "fakestorageaccount", inputShare: "fakestorageshare", expected: "/file/fakestorageaccount/fakestorageshare"}, + {inputAccount: "fakestorageaccount", inputShare: "fakestorageshare", inputFilePath: "fakestoragefile", expected: "/file/fakestorageaccount/fakestorageshare/fakestoragefile"}, + {inputAccount: "fakestorageaccount", inputShare: "fakestorageshare", inputFilePath: "fakestoragedirectory/fakestoragefile", expected: "/file/fakestorageaccount/fakestorageshare/fakestoragedirectory/fakestoragefile"}, + {inputAccount: "fakestorageaccount", inputShare: "fakestorageshare", inputFilePath: "fakestoragedirectory\\fakestoragefile", expected: "/file/fakestorageaccount/fakestorageshare/fakestoragedirectory/fakestoragefile"}, + {inputAccount: "fakestorageaccount", inputShare: "fakestorageshare", inputFilePath: "fakestoragedirectory", expected: "/file/fakestorageaccount/fakestorageshare/fakestoragedirectory"}, + } + for _, c := range testdata { + require.Equal(t, c.expected, getCanonicalName(c.inputAccount, c.inputShare, c.inputFilePath)) + } +} diff --git a/sdk/storage/azfile/sas/url_parts.go b/sdk/storage/azfile/sas/url_parts.go new file mode 100644 index 000000000000..3f741c921fd3 --- /dev/null +++ b/sdk/storage/azfile/sas/url_parts.go @@ -0,0 +1,147 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package sas + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/shared" + "net/url" + "strings" +) + +const ( + shareSnapshot = "sharesnapshot" +) + +// IPEndpointStyleInfo is used for IP endpoint style URL when working with Azure storage emulator. +// Ex: "https://10.132.141.33/accountname/sharename" +type IPEndpointStyleInfo struct { + AccountName string // "" if not using IP endpoint style +} + +// URLParts object represents the components that make up an Azure Storage Share/Directory/File URL. You parse an +// existing URL into its parts by calling NewFileURLParts(). You construct a URL from parts by calling URL(). +// NOTE: Changing any SAS-related field requires computing a new SAS signature. +type URLParts struct { + Scheme string // Ex: "https://" + Host string // Ex: "account.share.core.windows.net", "10.132.141.33", "10.132.141.33:80" + IPEndpointStyleInfo IPEndpointStyleInfo // Useful Parts for IP endpoint style URL. + ShareName string // Share name, Ex: "myshare" + DirectoryOrFilePath string // Path of directory or file, Ex: "mydirectory/myfile" + ShareSnapshot string // IsZero is true if not a snapshot + SAS QueryParameters + UnparsedParams string +} + +// ParseURL parses a URL initializing URLParts' fields including any SAS-related & sharesnapshot query parameters. +// Any other query parameters remain in the UnparsedParams field. +func ParseURL(u string) (URLParts, error) { + uri, err := url.Parse(u) + if err != nil { + return URLParts{}, err + } + + up := URLParts{ + Scheme: uri.Scheme, + Host: uri.Host, + } + + if uri.Path != "" { + path := uri.Path + if path[0] == '/' { + path = path[1:] + } + if shared.IsIPEndpointStyle(up.Host) { + if accountEndIndex := strings.Index(path, "/"); accountEndIndex == -1 { // Slash not found; path has account name & no share, path of directory or file + up.IPEndpointStyleInfo.AccountName = path + path = "" // no ShareName present in the URL so path should be empty + } else { + up.IPEndpointStyleInfo.AccountName = path[:accountEndIndex] // The account name is the part between the slashes + path = path[accountEndIndex+1:] + } + } + + shareEndIndex := strings.Index(path, "/") // Find the next slash (if it exists) + if shareEndIndex == -1 { // Slash not found; path has share name & no path of directory or file + up.ShareName = path + } else { // Slash found; path has share name & path of directory or file + up.ShareName = path[:shareEndIndex] + up.DirectoryOrFilePath = path[shareEndIndex+1:] + } + } + + // Convert the query parameters to a case-sensitive map & trim whitespace + paramsMap := uri.Query() + + up.ShareSnapshot = "" // Assume no snapshot + if snapshotStr, ok := caseInsensitiveValues(paramsMap).Get(shareSnapshot); ok { + up.ShareSnapshot = snapshotStr[0] + // If we recognized the query parameter, remove it from the map + delete(paramsMap, shareSnapshot) + } + + up.SAS = NewQueryParameters(paramsMap, true) + up.UnparsedParams = paramsMap.Encode() + return up, nil +} + +// String returns a URL object whose fields are initialized from the URLParts fields. The URL's RawQuery +// field contains the SAS, snapshot, and unparsed query parameters. +func (up URLParts) String() string { + path := "" + // Concatenate account name for IP endpoint style URL + if shared.IsIPEndpointStyle(up.Host) && up.IPEndpointStyleInfo.AccountName != "" { + path += "/" + up.IPEndpointStyleInfo.AccountName + } + // Concatenate share & path of directory or file (if they exist) + if up.ShareName != "" { + path += "/" + up.ShareName + if up.DirectoryOrFilePath != "" { + path += "/" + up.DirectoryOrFilePath + } + } + + rawQuery := up.UnparsedParams + + //If no snapshot is initially provided, fill it in from the SAS query properties to help the user + if up.ShareSnapshot == "" && !up.SAS.ShareSnapshotTime().IsZero() { + up.ShareSnapshot = up.SAS.ShareSnapshotTime().Format(SnapshotTimeFormat) + } + + // Concatenate share snapshot query parameter (if it exists) + if up.ShareSnapshot != "" { + if len(rawQuery) > 0 { + rawQuery += "&" + } + rawQuery += shareSnapshot + "=" + up.ShareSnapshot + } + sas := up.SAS.Encode() + if sas != "" { + if len(rawQuery) > 0 { + rawQuery += "&" + } + rawQuery += sas + } + u := url.URL{ + Scheme: up.Scheme, + Host: up.Host, + Path: path, + RawQuery: rawQuery, + } + return u.String() +} + +type caseInsensitiveValues url.Values // map[string][]string + +func (values caseInsensitiveValues) Get(key string) ([]string, bool) { + key = strings.ToLower(key) + for k, v := range values { + if strings.ToLower(k) == key { + return v, true + } + } + return []string{}, false +} diff --git a/sdk/storage/azfile/sas/url_parts_test.go b/sdk/storage/azfile/sas/url_parts_test.go new file mode 100644 index 000000000000..21691e0a7ae7 --- /dev/null +++ b/sdk/storage/azfile/sas/url_parts_test.go @@ -0,0 +1,75 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package sas + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestParseURLIPStyle(t *testing.T) { + urlWithIP := "https://127.0.0.1:5000/fakestorageaccount" + fileURLParts, err := ParseURL(urlWithIP) + require.NoError(t, err) + require.Equal(t, fileURLParts.Scheme, "https") + require.Equal(t, fileURLParts.Host, "127.0.0.1:5000") + require.Equal(t, fileURLParts.IPEndpointStyleInfo.AccountName, "fakestorageaccount") + + urlWithIP = "https://127.0.0.1:5000/fakestorageaccount/fakeshare" + fileURLParts, err = ParseURL(urlWithIP) + require.NoError(t, err) + require.Equal(t, fileURLParts.Scheme, "https") + require.Equal(t, fileURLParts.Host, "127.0.0.1:5000") + require.Equal(t, fileURLParts.IPEndpointStyleInfo.AccountName, "fakestorageaccount") + require.Equal(t, fileURLParts.ShareName, "fakeshare") + + urlWithIP = "https://127.0.0.1:5000/fakestorageaccount/fakeshare/fakefile" + fileURLParts, err = ParseURL(urlWithIP) + require.NoError(t, err) + require.Equal(t, fileURLParts.Scheme, "https") + require.Equal(t, fileURLParts.Host, "127.0.0.1:5000") + require.Equal(t, fileURLParts.IPEndpointStyleInfo.AccountName, "fakestorageaccount") + require.Equal(t, fileURLParts.ShareName, "fakeshare") + require.Equal(t, fileURLParts.DirectoryOrFilePath, "fakefile") +} + +func TestParseURL(t *testing.T) { + testStorageAccount := "fakestorageaccount" + host := fmt.Sprintf("%s.file.core.windows.net", testStorageAccount) + testShare := "fakeshare" + fileNames := []string{"/._.TESTT.txt", "/.gitignore/dummyfile1"} + + const sasStr = "sv=2019-12-12&sr=b&st=2111-01-09T01:42:34.936Z&se=2222-03-09T01:42:34.936Z&sp=rw&sip=168.1.5.60-168.1.5.70&spr=https,http&si=myIdentifier&ss=bf&srt=s&sig=clNxbtnkKSHw7f3KMEVVc4agaszoRFdbZr%2FWBmPNsrw%3D" + + for _, fileName := range fileNames { + sasURL := fmt.Sprintf("https://%s.file.core.windows.net/%s%s?%s", testStorageAccount, testShare, fileName, sasStr) + fileURLParts, err := ParseURL(sasURL) + require.NoError(t, err) + + require.Equal(t, fileURLParts.Scheme, "https") + require.Equal(t, fileURLParts.Host, host) + require.Equal(t, fileURLParts.ShareName, testShare) + + validateSAS(t, sasStr, fileURLParts.SAS) + } + + for _, fileName := range fileNames { + shareSnapshotID := "2011-03-09T01:42:34Z" + sasWithShareSnapshotID := "?sharesnapshot=" + shareSnapshotID + "&" + sasStr + urlWithShareSnapshot := fmt.Sprintf("https://%s.file.core.windows.net/%s%s%s", testStorageAccount, testShare, fileName, sasWithShareSnapshotID) + fileURLParts, err := ParseURL(urlWithShareSnapshot) + require.NoError(t, err) + + require.Equal(t, fileURLParts.Scheme, "https") + require.Equal(t, fileURLParts.Host, host) + require.Equal(t, fileURLParts.ShareName, testShare) + + validateSAS(t, sasStr, fileURLParts.SAS) + } +} diff --git a/sdk/storage/azfile/service/client.go b/sdk/storage/azfile/service/client.go new file mode 100644 index 000000000000..89bf5f02c5a3 --- /dev/null +++ b/sdk/storage/azfile/service/client.go @@ -0,0 +1,214 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package service + +import ( + "context" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/fileerror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/sas" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/share" + "net/http" + "strings" + "time" +) + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions base.ClientOptions + +// Client represents a URL to the Azure File Storage service allowing you to manipulate file shares. +type Client base.Client[generated.ServiceClient] + +// NewClientWithNoCredential creates an instance of Client with the specified values. +// This is used to anonymously access a storage account or with a shared access signature (SAS) token. +// - serviceURL - the URL of the storage account e.g. https://.file.core.windows.net/? +// - options - client options; pass nil to accept the default values +func NewClientWithNoCredential(serviceURL string, options *ClientOptions) (*Client, error) { + conOptions := shared.GetClientOptions(options) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewServiceClient(serviceURL, pl, nil)), nil +} + +// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. +// - serviceURL - the URL of the storage account e.g. https://.file.core.windows.net/ +// - cred - a SharedKeyCredential created with the matching storage account and access key +// - options - client options; pass nil to accept the default values +func NewClientWithSharedKeyCredential(serviceURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { + authPolicy := exported.NewSharedKeyCredPolicy(cred) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewServiceClient(serviceURL, pl, cred)), nil +} + +// NewClientFromConnectionString creates an instance of Client with the specified values. +// - connectionString - a connection string for the desired storage account +// - options - client options; pass nil to accept the default values +func NewClientFromConnectionString(connectionString string, options *ClientOptions) (*Client, error) { + parsed, err := shared.ParseConnectionString(connectionString) + if err != nil { + return nil, err + } + + if parsed.AccountKey != "" && parsed.AccountName != "" { + credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey) + if err != nil { + return nil, err + } + return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options) + } + + return NewClientWithNoCredential(parsed.ServiceURL, options) +} + +func (s *Client) generated() *generated.ServiceClient { + return base.InnerClient((*base.Client[generated.ServiceClient])(s)) +} + +func (s *Client) sharedKey() *SharedKeyCredential { + return base.SharedKey((*base.Client[generated.ServiceClient])(s)) +} + +// URL returns the URL endpoint used by the Client object. +func (s *Client) URL() string { + return s.generated().Endpoint() +} + +// NewShareClient creates a new share.Client object by concatenating shareName to the end of this Client's URL. +// The new share.Client uses the same request policy pipeline as the Client. +func (s *Client) NewShareClient(shareName string) *share.Client { + shareURL := runtime.JoinPaths(s.generated().Endpoint(), shareName) + return (*share.Client)(base.NewShareClient(shareURL, s.generated().Pipeline(), s.sharedKey())) +} + +// CreateShare is a lifecycle method to creates a new share under the specified account. +// If the share with the same name already exists, a ResourceExistsError will be raised. +// This method returns a client with which to interact with the newly created share. +// For more information see, https://learn.microsoft.com/en-us/rest/api/storageservices/create-share. +func (s *Client) CreateShare(ctx context.Context, shareName string, options *CreateShareOptions) (CreateShareResponse, error) { + shareClient := s.NewShareClient(shareName) + createShareResp, err := shareClient.Create(ctx, options) + return createShareResp, err +} + +// DeleteShare is a lifecycle method that marks the specified share for deletion. +// The share and any files contained within it are later deleted during garbage collection. +// If the share is not found, a ResourceNotFoundError will be raised. +// For more information see, https://learn.microsoft.com/en-us/rest/api/storageservices/delete-share. +func (s *Client) DeleteShare(ctx context.Context, shareName string, options *DeleteShareOptions) (DeleteShareResponse, error) { + shareClient := s.NewShareClient(shareName) + deleteShareResp, err := shareClient.Delete(ctx, options) + return deleteShareResp, err +} + +// RestoreShare restores soft-deleted share. +// Operation will only be successful if used within the specified number of days set in the delete retention policy. +// For more information see, https://learn.microsoft.com/en-us/rest/api/storageservices/restore-share. +func (s *Client) RestoreShare(ctx context.Context, deletedShareName string, deletedShareVersion string, options *RestoreShareOptions) (RestoreShareResponse, error) { + shareClient := s.NewShareClient(deletedShareName) + createShareResp, err := shareClient.Restore(ctx, deletedShareVersion, options) + return createShareResp, err +} + +// GetProperties operation gets the properties of a storage account's File service. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-file-service-properties. +func (s *Client) GetProperties(ctx context.Context, options *GetPropertiesOptions) (GetPropertiesResponse, error) { + opts := options.format() + resp, err := s.generated().GetProperties(ctx, opts) + return resp, err +} + +// SetProperties operation sets properties for a storage account's File service endpoint. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/set-file-service-properties. +func (s *Client) SetProperties(ctx context.Context, options *SetPropertiesOptions) (SetPropertiesResponse, error) { + svcProperties, o := options.format() + resp, err := s.generated().SetProperties(ctx, svcProperties, o) + return resp, err +} + +// NewListSharesPager operation returns a pager of the shares under the specified account. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/list-shares +func (s *Client) NewListSharesPager(options *ListSharesOptions) *runtime.Pager[ListSharesSegmentResponse] { + listOptions := generated.ServiceClientListSharesSegmentOptions{} + if options != nil { + if options.Include.Deleted { + listOptions.Include = append(listOptions.Include, ListSharesIncludeTypeDeleted) + } + if options.Include.Metadata { + listOptions.Include = append(listOptions.Include, ListSharesIncludeTypeMetadata) + } + if options.Include.Snapshots { + listOptions.Include = append(listOptions.Include, ListSharesIncludeTypeSnapshots) + } + listOptions.Marker = options.Marker + listOptions.Maxresults = options.MaxResults + listOptions.Prefix = options.Prefix + } + + return runtime.NewPager(runtime.PagingHandler[ListSharesSegmentResponse]{ + More: func(page ListSharesSegmentResponse) bool { + return page.NextMarker != nil && len(*page.NextMarker) > 0 + }, + Fetcher: func(ctx context.Context, page *ListSharesSegmentResponse) (ListSharesSegmentResponse, error) { + var req *policy.Request + var err error + if page == nil { + req, err = s.generated().ListSharesSegmentCreateRequest(ctx, &listOptions) + } else { + listOptions.Marker = page.NextMarker + req, err = s.generated().ListSharesSegmentCreateRequest(ctx, &listOptions) + } + if err != nil { + return ListSharesSegmentResponse{}, err + } + resp, err := s.generated().Pipeline().Do(req) + if err != nil { + return ListSharesSegmentResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ListSharesSegmentResponse{}, runtime.NewResponseError(resp) + } + return s.generated().ListSharesSegmentHandleResponse(resp) + }, + }) +} + +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at account. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +func (s *Client) GetSASURL(resources sas.AccountResourceTypes, permissions sas.AccountPermissions, expiry time.Time, o *GetSASURLOptions) (string, error) { + if s.sharedKey() == nil { + return "", fileerror.MissingSharedKeyCredential + } + st := o.format() + qps, err := sas.AccountSignatureValues{ + Version: sas.Version, + Protocol: sas.ProtocolHTTPS, + Permissions: permissions.String(), + ResourceTypes: resources.String(), + StartTime: st, + ExpiryTime: expiry.UTC(), + }.SignWithSharedKey(s.sharedKey()) + if err != nil { + return "", err + } + + endpoint := s.URL() + if !strings.HasSuffix(endpoint, "/") { + // add a trailing slash to be consistent with the portal + endpoint += "/" + } + endpoint += "?" + qps.Encode() + + return endpoint, nil +} diff --git a/sdk/storage/azfile/service/client_test.go b/sdk/storage/azfile/service/client_test.go new file mode 100644 index 000000000000..d9c3642c4628 --- /dev/null +++ b/sdk/storage/azfile/service/client_test.go @@ -0,0 +1,454 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package service_test + +import ( + "context" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/recording" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/fileerror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/testcommon" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/sas" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/service" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/share" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "strconv" + "testing" + "time" +) + +func Test(t *testing.T) { + recordMode := recording.GetRecordMode() + t.Logf("Running service Tests in %s mode\n", recordMode) + if recordMode == recording.LiveMode { + suite.Run(t, &ServiceRecordedTestsSuite{}) + suite.Run(t, &ServiceUnrecordedTestsSuite{}) + } else if recordMode == recording.PlaybackMode { + suite.Run(t, &ServiceRecordedTestsSuite{}) + } else if recordMode == recording.RecordingMode { + suite.Run(t, &ServiceRecordedTestsSuite{}) + } +} + +func (s *ServiceRecordedTestsSuite) BeforeTest(suite string, test string) { + testcommon.BeforeTest(s.T(), suite, test) +} + +func (s *ServiceRecordedTestsSuite) AfterTest(suite string, test string) { + testcommon.AfterTest(s.T(), suite, test) +} + +func (s *ServiceUnrecordedTestsSuite) BeforeTest(suite string, test string) { + +} + +func (s *ServiceUnrecordedTestsSuite) AfterTest(suite string, test string) { + +} + +type ServiceRecordedTestsSuite struct { + suite.Suite +} + +type ServiceUnrecordedTestsSuite struct { + suite.Suite +} + +func (s *ServiceRecordedTestsSuite) TestAccountNewServiceURLValidName() { + _require := require.New(s.T()) + + accountName, _ := testcommon.GetGenericAccountInfo(testcommon.TestAccountDefault) + _require.Greater(len(accountName), 0) + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + correctURL := "https://" + accountName + "." + testcommon.DefaultFileEndpointSuffix + _require.Equal(svcClient.URL(), correctURL) +} + +func (s *ServiceRecordedTestsSuite) TestAccountNewShareURLValidName() { + _require := require.New(s.T()) + testName := s.T().Name() + + accountName, _ := testcommon.GetGenericAccountInfo(testcommon.TestAccountDefault) + _require.Greater(len(accountName), 0) + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := svcClient.NewShareClient(shareName) + _require.NoError(err) + + correctURL := "https://" + accountName + "." + testcommon.DefaultFileEndpointSuffix + shareName + _require.Equal(shareClient.URL(), correctURL) +} + +func (s *ServiceRecordedTestsSuite) TestServiceClientFromConnectionString() { + _require := require.New(s.T()) + + svcClient, err := testcommon.GetServiceClientFromConnectionString(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + resp, err := svcClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.NotNil(resp.RequestID) +} + +func (s *ServiceRecordedTestsSuite) TestAccountProperties() { + _require := require.New(s.T()) + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + setPropertiesOptions := &service.SetPropertiesOptions{ + HourMetrics: &service.Metrics{ + Enabled: to.Ptr(true), + IncludeAPIs: to.Ptr(true), + RetentionPolicy: &service.RetentionPolicy{ + Enabled: to.Ptr(true), + Days: to.Ptr(int32(2)), + }, + }, + MinuteMetrics: &service.Metrics{ + Enabled: to.Ptr(true), + IncludeAPIs: to.Ptr(false), + RetentionPolicy: &service.RetentionPolicy{ + Enabled: to.Ptr(true), + Days: to.Ptr(int32(2)), + }, + }, + CORS: []*service.CORSRule{ + { + AllowedOrigins: to.Ptr("*"), + AllowedMethods: to.Ptr("PUT"), + AllowedHeaders: to.Ptr("x-ms-client-request-id"), + ExposedHeaders: to.Ptr("x-ms-*"), + MaxAgeInSeconds: to.Ptr(int32(2)), + }, + }, + } + + setPropsResp, err := svcClient.SetProperties(context.Background(), setPropertiesOptions) + _require.NoError(err) + _require.NotNil(setPropsResp.RequestID) + + time.Sleep(time.Second * 30) + + getPropsResp, err := svcClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.NotNil(getPropsResp.RequestID) + _require.EqualValues(getPropsResp.HourMetrics.RetentionPolicy.Enabled, setPropertiesOptions.HourMetrics.RetentionPolicy.Enabled) + _require.EqualValues(getPropsResp.HourMetrics.RetentionPolicy.Days, setPropertiesOptions.HourMetrics.RetentionPolicy.Days) + _require.EqualValues(getPropsResp.MinuteMetrics.RetentionPolicy.Enabled, setPropertiesOptions.MinuteMetrics.RetentionPolicy.Enabled) + _require.EqualValues(getPropsResp.MinuteMetrics.RetentionPolicy.Days, setPropertiesOptions.MinuteMetrics.RetentionPolicy.Days) + _require.EqualValues(len(getPropsResp.CORS), len(setPropertiesOptions.CORS)) +} + +func (s *ServiceRecordedTestsSuite) TestAccountHourMetrics() { + _require := require.New(s.T()) + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + setPropertiesOptions := &service.SetPropertiesOptions{ + HourMetrics: &service.Metrics{ + Enabled: to.Ptr(true), + IncludeAPIs: to.Ptr(true), + RetentionPolicy: &service.RetentionPolicy{ + Enabled: to.Ptr(true), + Days: to.Ptr(int32(5)), + }, + }, + } + _, err = svcClient.SetProperties(context.Background(), setPropertiesOptions) + _require.NoError(err) +} + +func (s *ServiceRecordedTestsSuite) TestAccountListSharesNonDefault() { + _require := require.New(s.T()) + testName := s.T().Name() + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + mySharePrefix := testcommon.GenerateEntityName(testName) + pager := svcClient.NewListSharesPager(&service.ListSharesOptions{ + Prefix: to.Ptr(mySharePrefix), + }) + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.NoError(err) + _require.NotNil(resp.Prefix) + _require.Equal(*resp.Prefix, mySharePrefix) + _require.NotNil(resp.ServiceEndpoint) + _require.NotNil(resp.Version) + _require.Len(resp.Shares, 0) + } + + shareClients := map[string]*share.Client{} + for i := 0; i < 4; i++ { + shareName := mySharePrefix + "share" + strconv.Itoa(i) + shareClients[shareName] = testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClients[shareName]) + + _, err := shareClients[shareName].SetMetadata(context.Background(), &share.SetMetadataOptions{ + Metadata: testcommon.BasicMetadata, + }) + _require.NoError(err) + } + + pager = svcClient.NewListSharesPager(&service.ListSharesOptions{ + Include: service.ListSharesInclude{Metadata: true, Snapshots: true}, + Prefix: to.Ptr(mySharePrefix), + MaxResults: to.Ptr(int32(2)), + }) + + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.NoError(err) + if len(resp.Shares) > 0 { + _require.Len(resp.Shares, 2) + } + for _, shareItem := range resp.Shares { + _require.NotNil(shareItem.Properties) + _require.NotNil(shareItem.Properties.LastModified) + _require.NotNil(shareItem.Properties.ETag) + _require.EqualValues(shareItem.Metadata, testcommon.BasicMetadata) + } + } +} + +func (s *ServiceUnrecordedTestsSuite) TestSASServiceClientRestoreShare() { + _require := require.New(s.T()) + testName := s.T().Name() + cred, _ := testcommon.GetGenericSharedKeyCredential(testcommon.TestAccountDefault) + + serviceClient, err := service.NewClientWithSharedKeyCredential(fmt.Sprintf("https://%s.file.core.windows.net/", cred.AccountName()), cred, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + + // Note: Always set all permissions, services, types to true to ensure order of string formed is correct. + resources := sas.AccountResourceTypes{ + Object: true, + Service: true, + Container: true, + } + permissions := sas.AccountPermissions{ + Read: true, + Write: true, + Delete: true, + List: true, + Create: true, + } + expiry := time.Now().Add(time.Hour) + sasUrl, err := serviceClient.GetSASURL(resources, permissions, expiry, nil) + _require.NoError(err) + + svcClient, err := testcommon.GetServiceClientNoCredential(s.T(), sasUrl, nil) + _require.NoError(err) + + resp, err := svcClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.NotNil(resp.RequestID) + + // create share using account SAS + _, err = svcClient.CreateShare(context.Background(), shareName, nil) + _require.NoError(err) + + defer func() { + _, err := svcClient.DeleteShare(context.Background(), shareName, nil) + _require.NoError(err) + }() + + _, err = svcClient.DeleteShare(context.Background(), shareName, nil) + _require.NoError(err) + + // wait for share deletion + time.Sleep(60 * time.Second) + + sharesCnt := 0 + shareVersion := "" + + pager := svcClient.NewListSharesPager(&service.ListSharesOptions{ + Include: service.ListSharesInclude{Deleted: true}, + Prefix: &shareName, + }) + + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.NoError(err) + for _, s := range resp.Shares { + if s.Deleted != nil && *s.Deleted { + _require.NotNil(s.Version) + shareVersion = *s.Version + } else { + sharesCnt++ + } + } + } + + _require.Equal(sharesCnt, 0) + _require.NotEmpty(shareVersion) + + restoreResp, err := svcClient.RestoreShare(context.Background(), shareName, shareVersion, nil) + _require.NoError(err) + _require.NotNil(restoreResp.RequestID) + + sharesCnt = 0 + pager = svcClient.NewListSharesPager(&service.ListSharesOptions{ + Prefix: &shareName, + }) + + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.NoError(err) + sharesCnt += len(resp.Shares) + } + _require.Equal(sharesCnt, 1) +} + +func (s *ServiceRecordedTestsSuite) TestSASServiceClientNoKey() { + _require := require.New(s.T()) + accountName, _ := testcommon.GetGenericAccountInfo(testcommon.TestAccountDefault) + _require.Greater(len(accountName), 0) + + serviceClient, err := service.NewClientWithNoCredential(fmt.Sprintf("https://%s.file.core.windows.net/", accountName), nil) + _require.NoError(err) + resources := sas.AccountResourceTypes{ + Object: true, + Service: true, + Container: true, + } + permissions := sas.AccountPermissions{ + Read: true, + Write: true, + Delete: true, + List: true, + Create: true, + } + + expiry := time.Now().Add(time.Hour) + _, err = serviceClient.GetSASURL(resources, permissions, expiry, nil) + _require.Equal(err, fileerror.MissingSharedKeyCredential) +} + +func (s *ServiceRecordedTestsSuite) TestSASServiceClientSignNegative() { + _require := require.New(s.T()) + accountName, accountKey := testcommon.GetGenericAccountInfo(testcommon.TestAccountDefault) + _require.Greater(len(accountName), 0) + _require.Greater(len(accountKey), 0) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + _require.NoError(err) + + serviceClient, err := service.NewClientWithSharedKeyCredential(fmt.Sprintf("https://%s.file.core.windows.net/", accountName), cred, nil) + _require.NoError(err) + resources := sas.AccountResourceTypes{ + Object: true, + Service: true, + Container: true, + } + permissions := sas.AccountPermissions{ + Read: true, + Write: true, + Delete: true, + List: true, + Create: true, + } + expiry := time.Time{} + + // zero expiry time + _, err = serviceClient.GetSASURL(resources, permissions, expiry, &service.GetSASURLOptions{StartTime: to.Ptr(time.Now())}) + _require.Equal(err.Error(), "account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType") + + // zero start and expiry time + _, err = serviceClient.GetSASURL(resources, permissions, expiry, &service.GetSASURLOptions{}) + _require.Equal(err.Error(), "account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType") + + // empty permissions + _, err = serviceClient.GetSASURL(sas.AccountResourceTypes{}, sas.AccountPermissions{}, expiry, nil) + _require.Equal(err.Error(), "account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType") +} + +func (s *ServiceRecordedTestsSuite) TestServiceSetPropertiesDefault() { + _require := require.New(s.T()) + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + _, err = svcClient.SetProperties(context.Background(), nil) + _require.NoError(err) +} + +func (s *ServiceRecordedTestsSuite) TestServiceCreateDeleteRestoreShare() { + _require := require.New(s.T()) + testName := s.T().Name() + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + + _, err = svcClient.CreateShare(context.Background(), shareName, nil) + _require.NoError(err) + + defer func() { + _, err := svcClient.DeleteShare(context.Background(), shareName, nil) + _require.NoError(err) + }() + + _, err = svcClient.DeleteShare(context.Background(), shareName, nil) + _require.NoError(err) + + // wait for share deletion + time.Sleep(60 * time.Second) + + sharesCnt := 0 + shareVersion := "" + + pager := svcClient.NewListSharesPager(&service.ListSharesOptions{ + Include: service.ListSharesInclude{Deleted: true}, + Prefix: &shareName, + }) + + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.NoError(err) + for _, s := range resp.Shares { + if s.Deleted != nil && *s.Deleted { + _require.NotNil(s.Version) + shareVersion = *s.Version + } else { + sharesCnt++ + } + } + } + + _require.Equal(sharesCnt, 0) + _require.NotEmpty(shareVersion) + + restoreResp, err := svcClient.RestoreShare(context.Background(), shareName, shareVersion, nil) + _require.NoError(err) + _require.NotNil(restoreResp.RequestID) + + sharesCnt = 0 + pager = svcClient.NewListSharesPager(&service.ListSharesOptions{ + Prefix: &shareName, + }) + + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.NoError(err) + sharesCnt += len(resp.Shares) + } + _require.Equal(sharesCnt, 1) +} diff --git a/sdk/storage/azfile/service/constants.go b/sdk/storage/azfile/service/constants.go new file mode 100644 index 000000000000..a936067376b4 --- /dev/null +++ b/sdk/storage/azfile/service/constants.go @@ -0,0 +1,37 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package service + +import "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" + +// ListSharesIncludeType defines values for ListSharesIncludeType +type ListSharesIncludeType = generated.ListSharesIncludeType + +const ( + ListSharesIncludeTypeSnapshots ListSharesIncludeType = generated.ListSharesIncludeTypeSnapshots + ListSharesIncludeTypeMetadata ListSharesIncludeType = generated.ListSharesIncludeTypeMetadata + ListSharesIncludeTypeDeleted ListSharesIncludeType = generated.ListSharesIncludeTypeDeleted +) + +// PossibleListSharesIncludeTypeValues returns the possible values for the ListSharesIncludeType const type. +func PossibleListSharesIncludeTypeValues() []ListSharesIncludeType { + return generated.PossibleListSharesIncludeTypeValues() +} + +// ShareRootSquash defines values for the root squashing behavior on the share when NFS is enabled. If it's not specified, the default is NoRootSquash. +type ShareRootSquash = generated.ShareRootSquash + +const ( + RootSquashNoRootSquash ShareRootSquash = generated.ShareRootSquashNoRootSquash + RootSquashRootSquash ShareRootSquash = generated.ShareRootSquashRootSquash + RootSquashAllSquash ShareRootSquash = generated.ShareRootSquashAllSquash +) + +// PossibleShareRootSquashValues returns the possible values for the RootSquash const type. +func PossibleShareRootSquashValues() []ShareRootSquash { + return generated.PossibleShareRootSquashValues() +} diff --git a/sdk/storage/azfile/service/examples_test.go b/sdk/storage/azfile/service/examples_test.go new file mode 100644 index 000000000000..bc7a2e4cd6dd --- /dev/null +++ b/sdk/storage/azfile/service/examples_test.go @@ -0,0 +1,308 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package service_test + +import ( + "context" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/sas" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/service" + "log" + "os" + "time" +) + +func handleError(err error) { + if err != nil { + log.Fatal(err.Error()) + } +} + +func Example_service_Client_NewClient() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + serviceURL := fmt.Sprintf("https://%s.file.core.windows.net/", accountName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + svcClient, err := service.NewClientWithSharedKeyCredential(serviceURL, cred, nil) + handleError(err) + + fmt.Println(svcClient.URL()) +} + +func Example_service_NewClientFromConnectionString() { + // Your connection string can be obtained from the Azure Portal. + connectionString, ok := os.LookupEnv("AZURE_STORAGE_CONNECTION_STRING") + if !ok { + log.Fatal("the environment variable 'AZURE_STORAGE_CONNECTION_STRING' could not be found") + } + + svcClient, err := service.NewClientFromConnectionString(connectionString, nil) + handleError(err) + + fmt.Println(svcClient.URL()) +} + +func Example_service_Client_NewShareClient() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + serviceURL := fmt.Sprintf("https://%s.file.core.windows.net/", accountName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + svcClient, err := service.NewClientWithSharedKeyCredential(serviceURL, cred, nil) + handleError(err) + + shareName := "testShare" + shareClient := svcClient.NewShareClient(shareName) + + fmt.Println(shareClient.URL()) +} + +func Example_service_Client_CreateShare() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + serviceURL := fmt.Sprintf("https://%s.file.core.windows.net/", accountName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + svcClient, err := service.NewClientWithSharedKeyCredential(serviceURL, cred, nil) + handleError(err) + + shareName := "testShare" + _, err = svcClient.CreateShare(context.TODO(), shareName, nil) + handleError(err) + fmt.Println("Share created") +} + +func Example_service_Client_DeleteShare() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + serviceURL := fmt.Sprintf("https://%s.file.core.windows.net/", accountName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + svcClient, err := service.NewClientWithSharedKeyCredential(serviceURL, cred, nil) + handleError(err) + + shareName := "testShare" + _, err = svcClient.DeleteShare(context.TODO(), shareName, nil) + handleError(err) + fmt.Println("Share deleted") +} + +func Example_service_Client_RestoreShare() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + serviceURL := fmt.Sprintf("https://%s.file.core.windows.net/", accountName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + svcClient, err := service.NewClientWithSharedKeyCredential(serviceURL, cred, nil) + handleError(err) + + // get share version for restore operation + pager := svcClient.NewListSharesPager(&service.ListSharesOptions{ + Include: service.ListSharesInclude{Deleted: true}, // Include deleted shares in the result + }) + + for pager.More() { + resp, err := pager.NextPage(context.Background()) + handleError(err) + for _, s := range resp.Shares { + if s.Deleted != nil && *s.Deleted { + _, err = svcClient.RestoreShare(context.TODO(), *s.Name, *s.Version, nil) + handleError(err) + } + } + } +} + +func Example_service_Client_GetProperties() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + serviceURL := fmt.Sprintf("https://%s.file.core.windows.net/", accountName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + svcClient, err := service.NewClientWithSharedKeyCredential(serviceURL, cred, nil) + handleError(err) + + _, err = svcClient.GetProperties(context.TODO(), nil) + handleError(err) +} + +func Example_service_Client_SetProperties() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + serviceURL := fmt.Sprintf("https://%s.file.core.windows.net/", accountName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + svcClient, err := service.NewClientWithSharedKeyCredential(serviceURL, cred, nil) + handleError(err) + + setPropertiesOpts := service.SetPropertiesOptions{ + HourMetrics: &service.Metrics{ + Enabled: to.Ptr(true), + IncludeAPIs: to.Ptr(true), + RetentionPolicy: &service.RetentionPolicy{ + Enabled: to.Ptr(true), + Days: to.Ptr(int32(2)), + }, + }, + MinuteMetrics: &service.Metrics{ + Enabled: to.Ptr(true), + IncludeAPIs: to.Ptr(false), + RetentionPolicy: &service.RetentionPolicy{ + Enabled: to.Ptr(true), + Days: to.Ptr(int32(2)), + }, + }, + CORS: []*service.CORSRule{ + { + AllowedOrigins: to.Ptr("*"), + AllowedMethods: to.Ptr("PUT"), + AllowedHeaders: to.Ptr("x-ms-client-request-id"), + ExposedHeaders: to.Ptr("x-ms-*"), + MaxAgeInSeconds: to.Ptr(int32(2)), + }, + }, + } + _, err = svcClient.SetProperties(context.TODO(), &setPropertiesOpts) + handleError(err) +} + +func Example_service_Client_ListShares() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + serviceURL := fmt.Sprintf("https://%s.file.core.windows.net/", accountName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + svcClient, err := service.NewClientWithSharedKeyCredential(serviceURL, cred, nil) + handleError(err) + + pager := svcClient.NewListSharesPager(nil) + + for pager.More() { + resp, err := pager.NextPage(context.Background()) + handleError(err) + for _, s := range resp.Shares { + fmt.Println(*s.Name) + } + } +} + +func Example_service_Client_GetSASURL() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + serviceURL := fmt.Sprintf("https://%s.file.core.windows.net/", accountName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + svcClient, err := service.NewClientWithSharedKeyCredential(serviceURL, cred, nil) + handleError(err) + + resources := sas.AccountResourceTypes{ + Object: true, + Service: true, + Container: true, + } + permissions := sas.AccountPermissions{ + Read: true, + Write: true, + Delete: true, + List: true, + Create: true, + } + expiry := time.Now().Add(time.Hour) + sasUrl, err := svcClient.GetSASURL(resources, permissions, expiry, nil) + handleError(err) + + fmt.Println("SAS URL: ", sasUrl) + + svcSASClient, err := service.NewClientWithNoCredential(sasUrl, nil) + handleError(err) + + _, err = svcSASClient.GetProperties(context.TODO(), nil) + handleError(err) +} diff --git a/sdk/storage/azfile/service/models.go b/sdk/storage/azfile/service/models.go new file mode 100644 index 000000000000..0a529af87248 --- /dev/null +++ b/sdk/storage/azfile/service/models.go @@ -0,0 +1,171 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package service + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/share" + "time" +) + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential = exported.SharedKeyCredential + +// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the +// storage account's name and either its primary or secondary key. +func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) { + return exported.NewSharedKeyCredential(accountName, accountKey) +} + +// CreateShareOptions contains the optional parameters for the share.Client.Create method. +type CreateShareOptions = share.CreateOptions + +// DeleteShareOptions contains the optional parameters for the share.Client.Delete method. +type DeleteShareOptions = share.DeleteOptions + +// RestoreShareOptions contains the optional parameters for the share.Client.Restore method. +type RestoreShareOptions = share.RestoreOptions + +// --------------------------------------------------------------------------------------------------------------------- + +// GetPropertiesOptions provides set of options for Client.GetProperties +type GetPropertiesOptions struct { + // placeholder for future options +} + +func (o *GetPropertiesOptions) format() *generated.ServiceClientGetPropertiesOptions { + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetPropertiesOptions provides set of options for Client.SetProperties +type SetPropertiesOptions struct { + // The set of CORS rules. + CORS []*CORSRule + + // A summary of request statistics grouped by API in hourly aggregates for files. + HourMetrics *Metrics + + // A summary of request statistics grouped by API in minute aggregates for files. + MinuteMetrics *Metrics + + // Protocol settings + Protocol *ProtocolSettings +} + +func (o *SetPropertiesOptions) format() (generated.StorageServiceProperties, *generated.ServiceClientSetPropertiesOptions) { + if o == nil { + return generated.StorageServiceProperties{}, nil + } + + formatMetrics(o.HourMetrics) + formatMetrics(o.MinuteMetrics) + + return generated.StorageServiceProperties{ + CORS: o.CORS, + HourMetrics: o.HourMetrics, + MinuteMetrics: o.MinuteMetrics, + Protocol: o.Protocol, + }, nil +} + +// update version of Storage Analytics to configure. Use 1.0 for this value. +func formatMetrics(m *Metrics) { + if m == nil { + return + } + + m.Version = to.Ptr(shared.StorageAnalyticsVersion) +} + +// StorageServiceProperties - Storage service properties. +type StorageServiceProperties = generated.StorageServiceProperties + +// CORSRule - CORS is an HTTP feature that enables a web application running under one domain to access resources in +// another domain. Web browsers implement a security restriction known as same-origin policy that +// prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin +// domain) to call APIs in another domain. +type CORSRule = generated.CORSRule + +// Metrics - Storage Analytics metrics for file service. +type Metrics = generated.Metrics + +// RetentionPolicy - The retention policy. +type RetentionPolicy = generated.RetentionPolicy + +// ProtocolSettings - Protocol settings +type ProtocolSettings = generated.ProtocolSettings + +// SMBSettings - Settings for SMB protocol. +type SMBSettings = generated.SMBSettings + +// SMBMultichannel - Settings for SMB multichannel +type SMBMultichannel = generated.SMBMultichannel + +// --------------------------------------------------------------------------------------------------------------------- + +// ListSharesOptions contains the optional parameters for the Client.NewListSharesPager method. +type ListSharesOptions struct { + // Include this parameter to specify one or more datasets to include in the responseBody. + Include ListSharesInclude + + // A string value that identifies the portion of the list to be returned with the next list operation. The operation returns + // a marker value within the responseBody body if the list returned was not complete. + // The marker value may then be used in a subsequent call to request the next set of list items. The marker value is opaque + // to the client. + Marker *string + + // Specifies the maximum number of entries to return. If the request does not specify maxresults, or specifies a value greater + // than 5,000, the server will return up to 5,000 items. + MaxResults *int32 + + // Filters the results to return only entries whose name begins with the specified prefix. + Prefix *string +} + +// ListSharesInclude indicates what additional information the service should return with each share. +type ListSharesInclude struct { + // Tells the service whether to return metadata for each share. + Metadata bool + + // Tells the service whether to return soft-deleted shares. + Deleted bool + + // Tells the service whether to return share snapshots. + Snapshots bool +} + +// Share - A listed Azure Storage share item. +type Share = generated.Share + +// ShareProperties - Properties of a share. +type ShareProperties = generated.ShareProperties + +// --------------------------------------------------------------------------------------------------------------------- + +// GetSASURLOptions contains the optional parameters for the Client.GetSASURL method. +type GetSASURLOptions struct { + StartTime *time.Time +} + +func (o *GetSASURLOptions) format() time.Time { + if o == nil { + return time.Time{} + } + + var st time.Time + if o.StartTime != nil { + st = o.StartTime.UTC() + } else { + st = time.Time{} + } + return st +} diff --git a/sdk/storage/azfile/service/responses.go b/sdk/storage/azfile/service/responses.go new file mode 100644 index 000000000000..fad91de63547 --- /dev/null +++ b/sdk/storage/azfile/service/responses.go @@ -0,0 +1,30 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package service + +import "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" + +// CreateShareResponse contains the response from method share.Client.Create. +type CreateShareResponse = generated.ShareClientCreateResponse + +// DeleteShareResponse contains the response from method share.Client.Delete. +type DeleteShareResponse = generated.ShareClientDeleteResponse + +// RestoreShareResponse contains the response from method share.Client.Restore. +type RestoreShareResponse = generated.ShareClientRestoreResponse + +// GetPropertiesResponse contains the response from method Client.GetProperties. +type GetPropertiesResponse = generated.ServiceClientGetPropertiesResponse + +// SetPropertiesResponse contains the response from method Client.SetProperties. +type SetPropertiesResponse = generated.ServiceClientSetPropertiesResponse + +// ListSharesSegmentResponse contains the response from method Client.NewListSharesPager. +type ListSharesSegmentResponse = generated.ServiceClientListSharesSegmentResponse + +// ListSharesResponse - An enumeration of shares. +type ListSharesResponse = generated.ListSharesResponse diff --git a/sdk/storage/azfile/share/client.go b/sdk/storage/azfile/share/client.go new file mode 100644 index 000000000000..aac826a8a6c1 --- /dev/null +++ b/sdk/storage/azfile/share/client.go @@ -0,0 +1,258 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package share + +import ( + "context" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/directory" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/fileerror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/sas" + "net/url" + "time" +) + +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions base.ClientOptions + +// Client represents a URL to the Azure Storage share allowing you to manipulate its directories and files. +type Client base.Client[generated.ShareClient] + +// NewClientWithNoCredential creates an instance of Client with the specified values. +// This is used to anonymously access a share or with a shared access signature (SAS) token. +// - shareURL - the URL of the share e.g. https://.file.core.windows.net/share? +// - options - client options; pass nil to accept the default values +func NewClientWithNoCredential(shareURL string, options *ClientOptions) (*Client, error) { + conOptions := shared.GetClientOptions(options) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewShareClient(shareURL, pl, nil)), nil +} + +// NewClientWithSharedKeyCredential creates an instance of Client with the specified values. +// - shareURL - the URL of the share e.g. https://.file.core.windows.net/share +// - cred - a SharedKeyCredential created with the matching share's storage account and access key +// - options - client options; pass nil to accept the default values +func NewClientWithSharedKeyCredential(shareURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { + authPolicy := exported.NewSharedKeyCredPolicy(cred) + conOptions := shared.GetClientOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + + return (*Client)(base.NewShareClient(shareURL, pl, cred)), nil +} + +// NewClientFromConnectionString creates an instance of Client with the specified values. +// - connectionString - a connection string for the desired storage account +// - shareName - the name of the share within the storage account +// - options - client options; pass nil to accept the default values +func NewClientFromConnectionString(connectionString string, shareName string, options *ClientOptions) (*Client, error) { + parsed, err := shared.ParseConnectionString(connectionString) + if err != nil { + return nil, err + } + parsed.ServiceURL = runtime.JoinPaths(parsed.ServiceURL, shareName) + + if parsed.AccountKey != "" && parsed.AccountName != "" { + credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey) + if err != nil { + return nil, err + } + return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options) + } + + return NewClientWithNoCredential(parsed.ServiceURL, options) +} + +func (s *Client) generated() *generated.ShareClient { + return base.InnerClient((*base.Client[generated.ShareClient])(s)) +} + +func (s *Client) sharedKey() *SharedKeyCredential { + return base.SharedKey((*base.Client[generated.ShareClient])(s)) +} + +// URL returns the URL endpoint used by the Client object. +func (s *Client) URL() string { + return s.generated().Endpoint() +} + +// NewDirectoryClient creates a new directory.Client object by concatenating directoryName to the end of this Client's URL. +// The new directory.Client uses the same request policy pipeline as the Client. +func (s *Client) NewDirectoryClient(directoryName string) *directory.Client { + directoryName = url.PathEscape(directoryName) + directoryURL := runtime.JoinPaths(s.URL(), directoryName) + return (*directory.Client)(base.NewDirectoryClient(directoryURL, s.generated().Pipeline(), s.sharedKey())) +} + +// NewRootDirectoryClient creates a new directory.Client object for the root of the share using the Client's URL. +// The new directory.Client uses the same request policy pipeline as the Client. +func (s *Client) NewRootDirectoryClient() *directory.Client { + rootDirURL := s.URL() + return (*directory.Client)(base.NewDirectoryClient(rootDirURL, s.generated().Pipeline(), s.sharedKey())) +} + +// WithSnapshot creates a new Client object identical to the source but with the specified share snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base share. +func (s *Client) WithSnapshot(shareSnapshot string) (*Client, error) { + p, err := sas.ParseURL(s.URL()) + if err != nil { + return nil, err + } + p.ShareSnapshot = shareSnapshot + + return (*Client)(base.NewShareClient(p.String(), s.generated().Pipeline(), s.sharedKey())), nil +} + +// Create operation creates a new share within a storage account. If a share with the same name already exists, the operation fails. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/create-share. +func (s *Client) Create(ctx context.Context, options *CreateOptions) (CreateResponse, error) { + opts := options.format() + resp, err := s.generated().Create(ctx, opts) + return resp, err +} + +// Delete operation marks the specified share for deletion. The share and any files contained within it are later deleted during garbage collection. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/delete-share. +func (s *Client) Delete(ctx context.Context, options *DeleteOptions) (DeleteResponse, error) { + opts, leaseAccessConditions := options.format() + resp, err := s.generated().Delete(ctx, opts, leaseAccessConditions) + return resp, err +} + +// Restore operation restores a share that had previously been soft-deleted. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/restore-share. +func (s *Client) Restore(ctx context.Context, deletedShareVersion string, options *RestoreOptions) (RestoreResponse, error) { + urlParts, err := sas.ParseURL(s.URL()) + if err != nil { + return RestoreResponse{}, err + } + + opts := &generated.ShareClientRestoreOptions{ + DeletedShareName: &urlParts.ShareName, + DeletedShareVersion: &deletedShareVersion, + } + resp, err := s.generated().Restore(ctx, opts) + return resp, err +} + +// GetProperties operation returns all user-defined metadata and system properties for the specified share or share snapshot. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-share-properties. +func (s *Client) GetProperties(ctx context.Context, options *GetPropertiesOptions) (GetPropertiesResponse, error) { + opts, leaseAccessConditions := options.format() + resp, err := s.generated().GetProperties(ctx, opts, leaseAccessConditions) + return resp, err +} + +// SetProperties operation sets properties for the specified share. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/set-share-properties. +func (s *Client) SetProperties(ctx context.Context, options *SetPropertiesOptions) (SetPropertiesResponse, error) { + opts, leaseAccessConditions := options.format() + resp, err := s.generated().SetProperties(ctx, opts, leaseAccessConditions) + return resp, err +} + +// CreateSnapshot operation creates a read-only snapshot of a share. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/snapshot-share. +func (s *Client) CreateSnapshot(ctx context.Context, options *CreateSnapshotOptions) (CreateSnapshotResponse, error) { + opts := options.format() + resp, err := s.generated().CreateSnapshot(ctx, opts) + return resp, err +} + +// GetAccessPolicy operation returns information about stored access policies specified on the share. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-share-acl. +func (s *Client) GetAccessPolicy(ctx context.Context, options *GetAccessPolicyOptions) (GetAccessPolicyResponse, error) { + opts, leaseAccessConditions := options.format() + resp, err := s.generated().GetAccessPolicy(ctx, opts, leaseAccessConditions) + return resp, err +} + +// SetAccessPolicy operation sets a stored access policy for use with shared access signatures. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/set-share-acl. +func (s *Client) SetAccessPolicy(ctx context.Context, options *SetAccessPolicyOptions) (SetAccessPolicyResponse, error) { + opts, acl, leaseAccessConditions, err := options.format() + if err != nil { + return SetAccessPolicyResponse{}, err + } + + resp, err := s.generated().SetAccessPolicy(ctx, acl, opts, leaseAccessConditions) + return resp, err +} + +// CreatePermission operation creates a permission (a security descriptor) at the share level. +// The created security descriptor can be used for the files and directories in the share. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/create-permission. +func (s *Client) CreatePermission(ctx context.Context, sharePermission string, options *CreatePermissionOptions) (CreatePermissionResponse, error) { + permission, opts := options.format(sharePermission) + resp, err := s.generated().CreatePermission(ctx, permission, opts) + return resp, err +} + +// GetPermission operation gets the SDDL permission string from the service using a known permission key. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-permission. +func (s *Client) GetPermission(ctx context.Context, filePermissionKey string, options *GetPermissionOptions) (GetPermissionResponse, error) { + opts := options.format() + resp, err := s.generated().GetPermission(ctx, filePermissionKey, opts) + return resp, err +} + +// SetMetadata operation sets one or more user-defined name-value pairs for the specified share. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/set-share-metadata. +func (s *Client) SetMetadata(ctx context.Context, options *SetMetadataOptions) (SetMetadataResponse, error) { + opts, leaseAccessConditions := options.format() + resp, err := s.generated().SetMetadata(ctx, opts, leaseAccessConditions) + return resp, err +} + +// GetStatistics operation retrieves statistics related to the share. +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-share-stats. +func (s *Client) GetStatistics(ctx context.Context, options *GetStatisticsOptions) (GetStatisticsResponse, error) { + opts, leaseAccessConditions := options.format() + resp, err := s.generated().GetStatistics(ctx, opts, leaseAccessConditions) + return resp, err +} + +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at share. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +func (s *Client) GetSASURL(permissions sas.SharePermissions, expiry time.Time, o *GetSASURLOptions) (string, error) { + if s.sharedKey() == nil { + return "", fileerror.MissingSharedKeyCredential + } + st := o.format() + + urlParts, err := sas.ParseURL(s.URL()) + if err != nil { + return "", err + } + + t, err := time.Parse(sas.SnapshotTimeFormat, urlParts.ShareSnapshot) + if err != nil { + t = time.Time{} + } + + qps, err := sas.SignatureValues{ + Version: sas.Version, + Protocol: sas.ProtocolHTTPS, + ShareName: urlParts.ShareName, + SnapshotTime: t, + Permissions: permissions.String(), + StartTime: st, + ExpiryTime: expiry.UTC(), + }.SignWithSharedKey(s.sharedKey()) + if err != nil { + return "", err + } + + endpoint := s.URL() + "?" + qps.Encode() + + return endpoint, nil +} diff --git a/sdk/storage/azfile/share/client_test.go b/sdk/storage/azfile/share/client_test.go new file mode 100644 index 000000000000..44940a537d27 --- /dev/null +++ b/sdk/storage/azfile/share/client_test.go @@ -0,0 +1,1460 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package share_test + +import ( + "context" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/recording" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/fileerror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/testcommon" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/sas" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/service" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/share" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "strconv" + "testing" + "time" +) + +func Test(t *testing.T) { + recordMode := recording.GetRecordMode() + t.Logf("Running share Tests in %s mode\n", recordMode) + if recordMode == recording.LiveMode { + suite.Run(t, &ShareRecordedTestsSuite{}) + suite.Run(t, &ShareUnrecordedTestsSuite{}) + } else if recordMode == recording.PlaybackMode { + suite.Run(t, &ShareRecordedTestsSuite{}) + } else if recordMode == recording.RecordingMode { + suite.Run(t, &ShareRecordedTestsSuite{}) + } +} + +func (s *ShareRecordedTestsSuite) BeforeTest(suite string, test string) { + testcommon.BeforeTest(s.T(), suite, test) +} + +func (s *ShareRecordedTestsSuite) AfterTest(suite string, test string) { + testcommon.AfterTest(s.T(), suite, test) +} + +func (s *ShareUnrecordedTestsSuite) BeforeTest(suite string, test string) { + +} + +func (s *ShareUnrecordedTestsSuite) AfterTest(suite string, test string) { + +} + +type ShareRecordedTestsSuite struct { + suite.Suite +} + +type ShareUnrecordedTestsSuite struct { + suite.Suite +} + +func (s *ShareRecordedTestsSuite) TestShareCreateRootDirectoryURL() { + _require := require.New(s.T()) + testName := s.T().Name() + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := testcommon.CreateNewShare(context.Background(), _require, testcommon.GenerateShareName(testName), svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + rootDirClient := shareClient.NewRootDirectoryClient() + _require.Equal(shareClient.URL(), rootDirClient.URL()) +} + +func (s *ShareRecordedTestsSuite) TestShareCreateDirectoryURL() { + _require := require.New(s.T()) + testName := s.T().Name() + + accountName, _ := testcommon.GetGenericAccountInfo(testcommon.TestAccountDefault) + _require.Greater(len(accountName), 0) + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName, dirName := testcommon.GenerateShareName(testName), testcommon.GenerateDirectoryName(testName) + shareClient := svcClient.NewShareClient(shareName) + _require.NoError(err) + dirClient := shareClient.NewDirectoryClient(dirName) + _require.NoError(err) + + correctURL := "https://" + accountName + ".file.core.windows.net/" + shareName + "/" + dirName + _require.Equal(dirClient.URL(), correctURL) +} + +func (s *ShareRecordedTestsSuite) TestShareCreateUsingSharedKey() { + _require := require.New(s.T()) + testName := s.T().Name() + + cred, err := testcommon.GetGenericSharedKeyCredential(testcommon.TestAccountDefault) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareURL := "https://" + cred.AccountName() + ".file.core.windows.net/" + shareName + options := &share.ClientOptions{} + testcommon.SetClientOptions(s.T(), &options.ClientOptions) + shareClient, err := share.NewClientWithSharedKeyCredential(shareURL, cred, options) + _require.NoError(err) + + resp, err := shareClient.Create(context.Background(), nil) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + _require.NoError(err) + _require.NotNil(resp.ETag) + _require.NotNil(resp.RequestID) +} + +func (s *ShareRecordedTestsSuite) TestShareCreateUsingConnectionString() { + _require := require.New(s.T()) + testName := s.T().Name() + + connString, err := testcommon.GetGenericConnectionString(testcommon.TestAccountDefault) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + options := &share.ClientOptions{} + testcommon.SetClientOptions(s.T(), &options.ClientOptions) + shareClient, err := share.NewClientFromConnectionString(*connString, shareName, options) + _require.NoError(err) + + resp, err := shareClient.Create(context.Background(), nil) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + _require.NoError(err) + _require.NotNil(resp.ETag) + _require.NotNil(resp.RequestID) +} + +func (s *ShareUnrecordedTestsSuite) TestShareClientUsingSAS() { + _require := require.New(s.T()) + testName := s.T().Name() + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + permissions := sas.SharePermissions{ + Read: true, + Write: true, + Delete: true, + List: true, + Create: true, + } + expiry := time.Now().Add(time.Hour) + + shareSASURL, err := shareClient.GetSASURL(permissions, expiry, nil) + _require.NoError(err) + + shareSASClient, err := share.NewClientWithNoCredential(shareSASURL, nil) + _require.NoError(err) + + _, err = shareSASClient.GetProperties(context.Background(), nil) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.AuthorizationFailure) + + dirName1 := testcommon.GenerateDirectoryName(testName) + "1" + _, err = shareSASClient.NewDirectoryClient(dirName1).Create(context.Background(), nil) + _require.NoError(err) + + dirName2 := testcommon.GenerateDirectoryName(testName) + "2" + _, err = shareSASClient.NewDirectoryClient(dirName2).Create(context.Background(), nil) + _require.NoError(err) + + fileName1 := testcommon.GenerateFileName(testName) + "1" + _, err = shareSASClient.NewRootDirectoryClient().NewFileClient(fileName1).Create(context.Background(), 1024, nil) + _require.NoError(err) + + fileName2 := testcommon.GenerateFileName(testName) + "2" + _, err = shareSASClient.NewDirectoryClient(dirName2).NewFileClient(fileName2).Create(context.Background(), 1024, nil) + _require.NoError(err) + + dirCtr, fileCtr := 0, 0 + pager := shareSASClient.NewRootDirectoryClient().NewListFilesAndDirectoriesPager(nil) + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.NoError(err) + dirCtr += len(resp.Segment.Directories) + fileCtr += len(resp.Segment.Files) + } + _require.Equal(dirCtr, 2) + _require.Equal(fileCtr, 1) +} + +func (s *ShareRecordedTestsSuite) TestShareCreateDeleteNonDefault() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := svcClient.NewShareClient(shareName) + + quota := int32(1000) + + cResp, err := shareClient.Create(context.Background(), &share.CreateOptions{ + AccessTier: to.Ptr(share.AccessTierCool), + Quota: to.Ptr(quota), + Metadata: testcommon.BasicMetadata}) + + _require.NoError(err) + _require.Equal(cResp.Date.IsZero(), false) + _require.NotNil(cResp.ETag) + _require.NotNil(cResp.LastModified) + _require.NotNil(cResp.RequestID) + _require.NotNil(cResp.Version) + + pager := svcClient.NewListSharesPager(&service.ListSharesOptions{ + Prefix: to.Ptr(shareName), + Include: service.ListSharesInclude{Metadata: true}, + }) + + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.NoError(err) + _require.Len(resp.Shares, 1) + _require.Equal(*resp.Shares[0].Name, shareName) + _require.NotNil(resp.Shares[0].Metadata) + _require.EqualValues(resp.Shares[0].Metadata, testcommon.BasicMetadata) + _require.Equal(*resp.Shares[0].Properties.AccessTier, string(share.AccessTierCool)) + _require.Equal(*resp.Shares[0].Properties.Quota, quota) + } + + dResp, err := shareClient.Delete(context.Background(), nil) + _require.NoError(err) + _require.NotNil(dResp.Date) + _require.NotNil(dResp.RequestID) + _require.NotNil(dResp.Version) + + pager1 := svcClient.NewListSharesPager(&service.ListSharesOptions{ + Prefix: to.Ptr(shareName), + Include: service.ListSharesInclude{Metadata: true}, + }) + for pager1.More() { + resp, err := pager1.NextPage(context.Background()) + _require.NoError(err) + _require.Len(resp.Shares, 0) + } +} + +func (s *ShareRecordedTestsSuite) TestShareCreateNilMetadata() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := svcClient.NewShareClient(shareName) + + _, err = shareClient.Create(context.Background(), nil) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + _require.NoError(err) + + response, err := shareClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Len(response.Metadata, 0) +} + +func (s *ShareRecordedTestsSuite) TestShareCreateNegativeInvalidName() { + _require := require.New(s.T()) + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareClient := svcClient.NewShareClient("foo bar") + + _, err = shareClient.Create(context.Background(), nil) + + testcommon.ValidateFileErrorCode(_require, err, fileerror.InvalidResourceName) +} + +func (s *ShareRecordedTestsSuite) TestShareCreateNegativeInvalidMetadata() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := svcClient.NewShareClient(shareName) + + _, err = shareClient.Create(context.Background(), &share.CreateOptions{ + Metadata: map[string]*string{"!@#$%^&*()": to.Ptr("!@#$%^&*()")}, + Quota: to.Ptr(int32(0)), + }) + _require.Error(err) +} + +func (s *ShareRecordedTestsSuite) TestShareDeleteNegativeNonExistent() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := svcClient.NewShareClient(shareName) + + _, err = shareClient.Delete(context.Background(), nil) + testcommon.ValidateFileErrorCode(_require, err, fileerror.ShareNotFound) +} + +func (s *ShareRecordedTestsSuite) TestShareGetSetPropertiesNonDefault() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + newQuota := int32(1234) + + sResp, err := shareClient.SetProperties(context.Background(), &share.SetPropertiesOptions{ + Quota: to.Ptr(newQuota), + AccessTier: to.Ptr(share.AccessTierHot), + }) + _require.NoError(err) + _require.NotNil(sResp.ETag) + _require.Equal(sResp.LastModified.IsZero(), false) + _require.NotNil(sResp.RequestID) + _require.NotNil(sResp.Version) + _require.Equal(sResp.Date.IsZero(), false) + + props, err := shareClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.NotNil(props.ETag) + _require.Equal(props.LastModified.IsZero(), false) + _require.NotNil(props.RequestID) + _require.NotNil(props.Version) + _require.Equal(props.Date.IsZero(), false) + _require.Equal(*props.Quota, newQuota) + _require.Equal(*props.AccessTier, string(share.AccessTierHot)) +} + +func (s *ShareRecordedTestsSuite) TestShareGetSetPropertiesDefault() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + sResp, err := shareClient.SetProperties(context.Background(), nil) + _require.NoError(err) + _require.NotNil(sResp.ETag) + _require.Equal(sResp.LastModified.IsZero(), false) + _require.NotNil(sResp.RequestID) + _require.NotNil(sResp.Version) + _require.Equal(sResp.Date.IsZero(), false) + + props, err := shareClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.NotNil(props.ETag) + _require.Equal(props.LastModified.IsZero(), false) + _require.NotNil(props.RequestID) + _require.NotNil(props.Version) + _require.Equal(props.Date.IsZero(), false) + _require.Greater(*props.Quota, int32(0)) // When using service default quota, it could be any value +} + +func (s *ShareRecordedTestsSuite) TestShareSetQuotaNegative() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + _, err = shareClient.SetProperties(context.Background(), &share.SetPropertiesOptions{Quota: to.Ptr(int32(-1))}) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.InvalidHeaderValue) +} + +func (s *ShareRecordedTestsSuite) TestShareGetPropertiesNegative() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.GetShareClient(shareName, svcClient) + + _, err = shareClient.GetProperties(context.Background(), nil) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.ShareNotFound) +} + +func (s *ShareRecordedTestsSuite) TestSharePutAndGetPermission() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + // Create a permission and check that it's not empty. + createResp, err := shareClient.CreatePermission(context.Background(), testcommon.SampleSDDL, nil) + _require.NoError(err) + _require.NotEqual(*createResp.FilePermissionKey, "") + + getResp, err := shareClient.GetPermission(context.Background(), *createResp.FilePermissionKey, nil) + _require.NoError(err) + // Rather than checking against the original, we check for emptiness, as Azure Files has set a nil-ness flag on SACLs + // and converted our well-known SID. + /* + Expected :string = "O:S-1-5-32-548G:S-1-5-21-397955417-626881126-188441444-512D:(A;;RPWPCCDCLCSWRCWDWOGA;;;S-1-0-0)" + Actual :string = "O:AOG:S-1-5-21-397955417-626881126-188441444-512D:(A;;CCDCLCSWRPWPRCWDWOGA;;;S-1-0-0)S:NO_ACCESS_CONTROL" + */ + _require.NotNil(getResp.Permission) + _require.NotEmpty(*getResp.Permission) +} + +func (s *ShareRecordedTestsSuite) TestShareGetSetAccessPolicyNonDefault() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + currTime, err := time.Parse(time.UnixDate, "Fri Mar 31 20:00:00 GMT 2023") + _require.NoError(err) + now := currTime.UTC().Truncate(10000 * time.Millisecond) // Enough resolution + expiryTIme := now.Add(5 * time.Minute).UTC() + pS := share.AccessPolicyPermission{ + Read: true, + Write: true, + Create: true, + Delete: true, + List: true, + } + pS2 := &share.AccessPolicyPermission{} + err = pS2.Parse("ldcwr") + _require.NoError(err) + _require.EqualValues(*pS2, pS) + + permission := pS.String() + permissions := []*share.SignedIdentifier{ + { + ID: to.Ptr("MTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTI="), + AccessPolicy: &share.AccessPolicy{ + Start: &now, + Expiry: &expiryTIme, + Permission: &permission, + }, + }} + + sResp, err := shareClient.SetAccessPolicy(context.Background(), &share.SetAccessPolicyOptions{ + ShareACL: permissions, + }) + _require.NoError(err) + _require.Equal(sResp.Date.IsZero(), false) + _require.NotNil(sResp.ETag) + _require.Equal(sResp.LastModified.IsZero(), false) + _require.NotNil(sResp.RequestID) + _require.NotNil(sResp.Version) + + gResp, err := shareClient.GetAccessPolicy(context.Background(), nil) + _require.NoError(err) + _require.Equal(gResp.Date.IsZero(), false) + _require.NotNil(gResp.ETag) + _require.Equal(gResp.LastModified.IsZero(), false) + _require.NotNil(gResp.RequestID) + _require.NotNil(gResp.Version) + _require.Len(gResp.SignedIdentifiers, 1) + _require.EqualValues(*(gResp.SignedIdentifiers[0]), *permissions[0]) +} + +func (s *ShareRecordedTestsSuite) TestShareGetSetAccessPolicyNonDefaultMultiple() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + currTime, err := time.Parse(time.UnixDate, "Fri Mar 31 20:00:00 GMT 2023") + _require.NoError(err) + now := currTime.UTC().Truncate(10000 * time.Millisecond) // Enough resolution + expiryTIme := now.Add(5 * time.Minute).UTC() + permission := share.AccessPolicyPermission{ + Read: true, + Write: true, + }.String() + + permissions := []*share.SignedIdentifier{ + { + ID: to.Ptr("MTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTI="), + AccessPolicy: &share.AccessPolicy{ + Start: &now, + Expiry: &expiryTIme, + Permission: &permission, + }, + }, + { + ID: to.Ptr("2"), + AccessPolicy: &share.AccessPolicy{ + Start: &now, + Expiry: &expiryTIme, + Permission: &permission, + }, + }} + + sResp, err := shareClient.SetAccessPolicy(context.Background(), &share.SetAccessPolicyOptions{ + ShareACL: permissions, + }) + _require.NoError(err) + _require.Equal(sResp.Date.IsZero(), false) + _require.NotNil(sResp.ETag) + _require.Equal(sResp.LastModified.IsZero(), false) + _require.NotNil(sResp.RequestID) + _require.NotNil(sResp.Version) + + gResp, err := shareClient.GetAccessPolicy(context.Background(), nil) + _require.NoError(err) + _require.Equal(gResp.Date.IsZero(), false) + _require.NotNil(gResp.ETag) + _require.Equal(gResp.LastModified.IsZero(), false) + _require.NotNil(gResp.RequestID) + _require.NotNil(gResp.Version) + _require.Len(gResp.SignedIdentifiers, 2) + _require.EqualValues(gResp.SignedIdentifiers[0], permissions[0]) + _require.EqualValues(gResp.SignedIdentifiers[1], permissions[1]) +} + +func (s *ShareRecordedTestsSuite) TestShareSetAccessPolicyMoreThanFive() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + currTime, err := time.Parse(time.UnixDate, "Fri Mar 31 20:00:00 GMT 2023") + _require.NoError(err) + now := currTime.UTC().Truncate(10000 * time.Millisecond) // Enough resolution + expiryTIme := now.Add(5 * time.Minute).UTC() + permission := share.AccessPolicyPermission{ + Read: true, + Create: true, + Write: true, + Delete: true, + List: true, + }.String() + + var permissions []*share.SignedIdentifier + for i := 0; i <= len(permission); i++ { + p := permission + if i < len(permission) { + p = string(permission[i]) + } + permissions = append(permissions, &share.SignedIdentifier{ + ID: to.Ptr(fmt.Sprintf("%v", i)), + AccessPolicy: &share.AccessPolicy{ + Start: &now, + Expiry: &expiryTIme, + Permission: &p, + }, + }) + } + _require.Len(permissions, 6) + + _, err = shareClient.SetAccessPolicy(context.Background(), &share.SetAccessPolicyOptions{ + ShareACL: permissions, + }) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.InvalidXMLDocument) +} + +func (s *ShareRecordedTestsSuite) TestShareGetSetAccessPolicyDefault() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + sResp, err := shareClient.SetAccessPolicy(context.Background(), nil) + _require.NoError(err) + _require.Equal(sResp.Date.IsZero(), false) + _require.NotNil(sResp.ETag) + _require.Equal(sResp.LastModified.IsZero(), false) + _require.NotNil(sResp.RequestID) + _require.NotNil(sResp.Version) + + gResp, err := shareClient.GetAccessPolicy(context.Background(), nil) + _require.NoError(err) + _require.Equal(gResp.Date.IsZero(), false) + _require.NotNil(gResp.ETag) + _require.Equal(gResp.LastModified.IsZero(), false) + _require.NotNil(gResp.RequestID) + _require.NotNil(gResp.Version) + _require.Len(gResp.SignedIdentifiers, 0) +} + +func (s *ShareRecordedTestsSuite) TestShareGetAccessPolicyNegative() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.GetShareClient(shareName, svcClient) + + _, err = shareClient.GetAccessPolicy(context.Background(), nil) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.ShareNotFound) +} + +func (s *ShareRecordedTestsSuite) TestShareSetAccessPolicyNonDefaultDeleteAndModifyACL() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + currTime, err := time.Parse(time.UnixDate, "Thu Mar 30 20:00:00 GMT 2023") + _require.NoError(err) + start := currTime.UTC().Truncate(10000 * time.Millisecond) + expiry := start.Add(5 * time.Minute).UTC() + accessPermission := share.AccessPolicyPermission{List: true}.String() + permissions := make([]*share.SignedIdentifier, 2) + for i := 0; i < 2; i++ { + permissions[i] = &share.SignedIdentifier{ + ID: to.Ptr("000" + strconv.Itoa(i)), + AccessPolicy: &share.AccessPolicy{ + Start: &start, + Expiry: &expiry, + Permission: &accessPermission, + }, + } + } + + _, err = shareClient.SetAccessPolicy(context.Background(), &share.SetAccessPolicyOptions{ + ShareACL: permissions, + }) + _require.NoError(err) + + resp, err := shareClient.GetAccessPolicy(context.Background(), nil) + _require.NoError(err) + _require.Len(resp.SignedIdentifiers, len(permissions)) + _require.EqualValues(resp.SignedIdentifiers, permissions) + + permissions = resp.SignedIdentifiers[:1] // Delete the second policy by removing it from the slice + permissions[0].ID = to.Ptr("0004") // Modify the remaining policy which is at index 0 in the new slice + _, err = shareClient.SetAccessPolicy(context.Background(), &share.SetAccessPolicyOptions{ + ShareACL: permissions, + }) + _require.NoError(err) + + resp, err = shareClient.GetAccessPolicy(context.Background(), nil) + _require.NoError(err) + _require.Len(resp.SignedIdentifiers, 1) + _require.EqualValues(resp.SignedIdentifiers, permissions) +} + +func (s *ShareRecordedTestsSuite) TestShareSetAccessPolicyDeleteAllPolicies() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + currTime, err := time.Parse(time.UnixDate, "Fri Mar 31 20:00:00 GMT 2023") + _require.NoError(err) + start := currTime.UTC() + expiry := start.Add(5 * time.Minute).UTC() + accessPermission := share.AccessPolicyPermission{List: true}.String() + permissions := make([]*share.SignedIdentifier, 2) + for i := 0; i < 2; i++ { + permissions[i] = &share.SignedIdentifier{ + ID: to.Ptr("000" + strconv.Itoa(i)), + AccessPolicy: &share.AccessPolicy{ + Start: &start, + Expiry: &expiry, + Permission: &accessPermission, + }, + } + } + + _, err = shareClient.SetAccessPolicy(context.Background(), &share.SetAccessPolicyOptions{ + ShareACL: permissions, + }) + _require.NoError(err) + + resp1, err := shareClient.GetAccessPolicy(context.Background(), nil) + _require.NoError(err) + _require.Len(resp1.SignedIdentifiers, 2) + + _, err = shareClient.SetAccessPolicy(context.Background(), nil) + _require.NoError(err) + + resp2, err := shareClient.GetAccessPolicy(context.Background(), nil) + _require.NoError(err) + _require.Len(resp2.SignedIdentifiers, 0) +} + +func (s *ShareRecordedTestsSuite) TestShareSetPermissionsNegativeInvalidPolicyTimes() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + // Swap start and expiry + currTime, err := time.Parse(time.UnixDate, "Fri Mar 31 20:00:00 GMT 2023") + _require.NoError(err) + expiry := currTime.UTC() + start := expiry.Add(5 * time.Minute).UTC() + accessPermission := share.AccessPolicyPermission{List: true}.String() + permissions := make([]*share.SignedIdentifier, 2) + for i := 0; i < 2; i++ { + permissions[i] = &share.SignedIdentifier{ + ID: to.Ptr("000" + strconv.Itoa(i)), + AccessPolicy: &share.AccessPolicy{ + Start: &start, + Expiry: &expiry, + Permission: &accessPermission, + }, + } + } + + _, err = shareClient.SetAccessPolicy(context.Background(), &share.SetAccessPolicyOptions{ + ShareACL: permissions, + }) + _require.NoError(err) + + resp, err := shareClient.GetAccessPolicy(context.Background(), nil) + _require.NoError(err) + _require.Len(resp.SignedIdentifiers, len(permissions)) + _require.EqualValues(resp.SignedIdentifiers, permissions) +} + +// SignedIdentifier ID too long +func (s *ShareRecordedTestsSuite) TestShareSetPermissionsNegative() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + id := "" + for i := 0; i < 65; i++ { + id += "a" + } + currTime, err := time.Parse(time.UnixDate, "Wed Mar 29 20:00:00 GMT 2023") + _require.NoError(err) + expiry := currTime.UTC() + start := expiry.Add(5 * time.Minute).UTC() + accessPermission := share.AccessPolicyPermission{List: true}.String() + permissions := make([]*share.SignedIdentifier, 2) + for i := 0; i < 2; i++ { + permissions[i] = &share.SignedIdentifier{ + ID: to.Ptr(id), + AccessPolicy: &share.AccessPolicy{ + Start: &start, + Expiry: &expiry, + Permission: &accessPermission, + }, + } + } + + _, err = shareClient.SetAccessPolicy(context.Background(), &share.SetAccessPolicyOptions{ + ShareACL: permissions, + }) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.InvalidXMLDocument) +} + +func (s *ShareRecordedTestsSuite) TestShareGetSetMetadataDefault() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + sResp, err := shareClient.SetMetadata(context.Background(), &share.SetMetadataOptions{ + Metadata: map[string]*string{}, + }) + _require.NoError(err) + _require.Equal(sResp.Date.IsZero(), false) + _require.NotNil(sResp.ETag) + _require.Equal(sResp.LastModified.IsZero(), false) + _require.NotNil(sResp.RequestID) + _require.NotNil(sResp.Version) + + gResp, err := shareClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(gResp.Date.IsZero(), false) + _require.NotNil(gResp.ETag) + _require.Equal(gResp.LastModified.IsZero(), false) + _require.NotNil(gResp.RequestID) + _require.NotNil(gResp.Version) + _require.Len(gResp.Metadata, 0) +} + +func (s *ShareRecordedTestsSuite) TestShareGetSetMetadataNonDefault() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + md := map[string]*string{ + "Foo": to.Ptr("FooValuE"), + "Bar": to.Ptr("bArvaLue"), + } + sResp, err := shareClient.SetMetadata(context.Background(), &share.SetMetadataOptions{ + Metadata: md, + }) + _require.NoError(err) + _require.Equal(sResp.Date.IsZero(), false) + _require.NotNil(sResp.ETag) + _require.Equal(sResp.LastModified.IsZero(), false) + _require.NotNil(sResp.RequestID) + _require.NotNil(sResp.Version) + + gResp, err := shareClient.GetProperties(context.Background(), nil) + _require.NoError(err) + _require.Equal(gResp.Date.IsZero(), false) + _require.NotNil(gResp.ETag) + _require.Equal(gResp.LastModified.IsZero(), false) + _require.NotNil(gResp.RequestID) + _require.NotNil(gResp.Version) + _require.EqualValues(gResp.Metadata, md) +} + +func (s *ShareRecordedTestsSuite) TestShareSetMetadataNegative() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + md := map[string]*string{ + "!@#$%^&*()": to.Ptr("!@#$%^&*()"), + } + _, err = shareClient.SetMetadata(context.Background(), &share.SetMetadataOptions{ + Metadata: md, + }) + _require.Error(err) +} + +func (s *ShareRecordedTestsSuite) TestShareGetStats() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + newQuota := int32(300) + + // In order to test and get LastModified property. + _, err = shareClient.SetProperties(context.Background(), &share.SetPropertiesOptions{Quota: to.Ptr(newQuota)}) + _require.NoError(err) + + gResp, err := shareClient.GetStatistics(context.Background(), nil) + _require.NoError(err) + _require.Equal(gResp.Date.IsZero(), false) + // _require.NotEqual(*gResp.ETag, "") // TODO: The ETag would be "" + // _require.Equal(gResp.LastModified.IsZero(), false) // TODO: Even share is once updated, no LastModified would be returned. + _require.NotNil(gResp.RequestID) + _require.NotNil(gResp.Version) + _require.Equal(*gResp.ShareUsageBytes, int64(0)) +} + +func (s *ShareRecordedTestsSuite) TestShareGetStatsNegative() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.GetShareClient(shareName, svcClient) + + _, err = shareClient.GetStatistics(context.Background(), nil) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.ShareNotFound) +} + +func (s *ShareRecordedTestsSuite) TestSetAndGetStatistics() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.GetShareClient(shareName, svcClient) + + _, err = shareClient.Create(context.Background(), &share.CreateOptions{Quota: to.Ptr(int32(1024))}) + _require.NoError(err) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + dirClient := shareClient.NewDirectoryClient("testdir") + _, err = dirClient.Create(context.Background(), nil) + _require.NoError(err) + + fileClient := dirClient.NewFileClient("testfile") + _, err = fileClient.Create(context.Background(), int64(1024*1024*1024*1024), nil) + _require.NoError(err) + + getStats, err := shareClient.GetStatistics(context.Background(), nil) + _require.NoError(err) + _require.Equal(*getStats.ShareUsageBytes, int64(1024*1024*1024*1024)) +} + +func deleteShare(ctx context.Context, _require *require.Assertions, shareClient *share.Client, o *share.DeleteOptions) { + _, err := shareClient.Delete(ctx, o) + _require.NoError(err) +} + +func (s *ShareRecordedTestsSuite) TestShareCreateSnapshotNonDefault() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer deleteShare(context.Background(), _require, shareClient, &share.DeleteOptions{DeleteSnapshots: to.Ptr(share.DeleteSnapshotsOptionTypeInclude)}) + + cResp, err := shareClient.CreateSnapshot(context.Background(), &share.CreateSnapshotOptions{Metadata: testcommon.BasicMetadata}) + _require.NoError(err) + _require.Equal(cResp.Date.IsZero(), false) + _require.NotNil(cResp.ETag) + _require.NotEqual(*cResp.ETag, "") + _require.Equal(cResp.LastModified.IsZero(), false) + _require.NotNil(cResp.RequestID) + _require.NotNil(cResp.Version) + _require.NotNil(cResp.Snapshot) + _require.NotEqual(*cResp.Snapshot, "") + + cSnapshot := *cResp.Snapshot + + pager := svcClient.NewListSharesPager(&service.ListSharesOptions{ + Include: service.ListSharesInclude{Metadata: true, Snapshots: true}, + Prefix: &shareName, + }) + + foundSnapshot := false + for pager.More() { + lResp, err := pager.NextPage(context.Background()) + _require.NoError(err) + _require.Len(lResp.Shares, 2) + + for _, s := range lResp.Shares { + if s.Snapshot != nil { + foundSnapshot = true + _require.Equal(*s.Snapshot, cSnapshot) + _require.NotNil(s.Metadata) + _require.EqualValues(s.Metadata, testcommon.BasicMetadata) + } else { + _require.Len(s.Metadata, 0) + } + } + } + _require.True(foundSnapshot) +} + +func (s *ShareUnrecordedTestsSuite) TestShareCreateSnapshotDefault() { + _require := require.New(s.T()) + testName := s.T().Name() + + cred, err := testcommon.GetGenericSharedKeyCredential(testcommon.TestAccountDefault) + _require.NoError(err) + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := svcClient.NewShareClient(shareName) + + _, err = shareClient.Create(context.Background(), nil) + _require.NoError(err) + defer deleteShare(context.Background(), _require, shareClient, &share.DeleteOptions{DeleteSnapshots: to.Ptr(share.DeleteSnapshotsOptionTypeInclude)}) + + // create a file in the base share. + dirClient := shareClient.NewRootDirectoryClient() + _require.NoError(err) + + fClient := dirClient.NewFileClient("myfile") + _, err = fClient.Create(context.Background(), 0, nil) + _require.NoError(err) + + // Create share snapshot, the snapshot contains the create file. + snapshotShare, err := shareClient.CreateSnapshot(context.Background(), nil) + _require.NoError(err) + + // Delete file in base share. + _, err = fClient.Delete(context.Background(), nil) + _require.NoError(err) + + // To produce a share SAS (as opposed to a file SAS), assign to FilePermissions using + // ShareSASPermissions and make sure the DirectoryAndFilePath field is "" (the default). + perms := sas.SharePermissions{Read: true, Write: true} + + // Restore file from share snapshot. + // Create a SAS. + sasQueryParams, err := sas.SignatureValues{ + Protocol: sas.ProtocolHTTPS, // Users MUST use HTTPS (not HTTP) + ExpiryTime: time.Now().UTC().Add(48 * time.Hour), // 48-hours before expiration + ShareName: shareName, + Permissions: perms.String(), + }.SignWithSharedKey(cred) + _require.NoError(err) + + // Build a file snapshot URL. + fileParts, err := sas.ParseURL(fClient.URL()) + _require.NoError(err) + fileParts.ShareSnapshot = *snapshotShare.Snapshot + fileParts.SAS = sasQueryParams + sourceURL := fileParts.String() + + // Before restore + _, err = fClient.GetProperties(context.Background(), nil) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.ResourceNotFound) + + // Do restore. + _, err = fClient.StartCopyFromURL(context.Background(), sourceURL, nil) + _require.NoError(err) + + time.Sleep(2 * time.Second) + + // After restore + _, err = fClient.GetProperties(context.Background(), nil) + _require.NoError(err) + + _, err = shareClient.Delete(context.Background(), &share.DeleteOptions{ + ShareSnapshot: snapshotShare.Snapshot, + }) + _require.NoError(err) +} + +func (s *ShareRecordedTestsSuite) TestShareCreateSnapshotNegativeShareNotExist() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.GetShareClient(shareName, svcClient) + + _, err = shareClient.CreateSnapshot(context.Background(), &share.CreateSnapshotOptions{Metadata: map[string]*string{}}) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.ShareNotFound) +} + +func (s *ShareRecordedTestsSuite) TestShareDeleteSnapshot() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer deleteShare(context.Background(), _require, shareClient, &share.DeleteOptions{DeleteSnapshots: to.Ptr(share.DeleteSnapshotsOptionTypeInclude)}) + + resp1, err := shareClient.CreateSnapshot(context.Background(), nil) + _require.NoError(err) + _require.NotNil(resp1.Snapshot) + _require.NotEmpty(*resp1.Snapshot) + + resp2, err := shareClient.CreateSnapshot(context.Background(), nil) + _require.NoError(err) + _require.NotNil(resp2.Snapshot) + _require.NotEmpty(*resp2.Snapshot) + + pager := svcClient.NewListSharesPager(&service.ListSharesOptions{ + Include: service.ListSharesInclude{Snapshots: true}, + Prefix: &shareName, + }) + + snapshotsCtr := 0 + for pager.More() { + lResp, err := pager.NextPage(context.Background()) + _require.NoError(err) + _require.Len(lResp.Shares, 3) // 2 snapshots and 1 share + + for _, s := range lResp.Shares { + if s.Snapshot != nil { + snapshotsCtr++ + } + } + } + _require.Equal(snapshotsCtr, 2) + + snapClient, err := shareClient.WithSnapshot(*resp1.Snapshot) + _require.NoError(err) + + _, err = snapClient.Delete(context.Background(), nil) + _require.NoError(err) + + pager = svcClient.NewListSharesPager(&service.ListSharesOptions{ + Include: service.ListSharesInclude{Snapshots: true}, + Prefix: &shareName, + }) + + snapshotsCtr = 0 + for pager.More() { + lResp, err := pager.NextPage(context.Background()) + _require.NoError(err) + _require.Len(lResp.Shares, 2) + + for _, s := range lResp.Shares { + if s.Snapshot != nil { + snapshotsCtr++ + _require.Equal(*s.Snapshot, *resp2.Snapshot) + } + } + } + _require.Equal(snapshotsCtr, 1) +} + +func (s *ShareRecordedTestsSuite) TestShareCreateSnapshotNegativeMetadataInvalid() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + _, err = shareClient.CreateSnapshot(context.Background(), &share.CreateSnapshotOptions{Metadata: map[string]*string{"!@#$%^&*()": to.Ptr("!@#$%^&*()")}}) + _require.Error(err) +} + +func (s *ShareRecordedTestsSuite) TestShareCreateSnapshotNegativeSnapshotOfSnapshot() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer deleteShare(context.Background(), _require, shareClient, &share.DeleteOptions{DeleteSnapshots: to.Ptr(share.DeleteSnapshotsOptionTypeInclude)}) + + snapTime, err := time.Parse(time.UnixDate, "Fri Mar 31 20:00:00 GMT 2023") + _require.NoError(err) + + snapshotClient, err := shareClient.WithSnapshot(snapTime.UTC().String()) + _require.NoError(err) + + cResp, err := snapshotClient.CreateSnapshot(context.Background(), nil) + _require.NoError(err) //Note: this would not fail, snapshot would be ignored. + _require.NotNil(cResp) + _require.NotEmpty(*cResp.Snapshot) + + snapshotRecursiveClient, err := shareClient.WithSnapshot(*cResp.Snapshot) + _require.NoError(err) + _, err = snapshotRecursiveClient.CreateSnapshot(context.Background(), nil) + _require.NoError(err) //Note: this would not fail, snapshot would be ignored. +} + +func (s *ShareRecordedTestsSuite) TestShareDeleteSnapshotsInclude() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + + _, err = shareClient.CreateSnapshot(context.Background(), nil) + _require.NoError(err) + + pager := svcClient.NewListSharesPager(&service.ListSharesOptions{ + Include: service.ListSharesInclude{Snapshots: true}, + Prefix: &shareName, + }) + + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.NoError(err) + _require.Len(resp.Shares, 2) + } + + _, err = shareClient.Delete(context.Background(), &share.DeleteOptions{DeleteSnapshots: to.Ptr(share.DeleteSnapshotsOptionTypeInclude)}) + _require.NoError(err) + + pager = svcClient.NewListSharesPager(&service.ListSharesOptions{ + Include: service.ListSharesInclude{Snapshots: true}, + Prefix: &shareName, + }) + + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.NoError(err) + _require.Len(resp.Shares, 0) + } +} + +func (s *ShareRecordedTestsSuite) TestShareDeleteSnapshotsNoneWithSnapshots() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDefault, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer deleteShare(context.Background(), _require, shareClient, &share.DeleteOptions{DeleteSnapshots: to.Ptr(share.DeleteSnapshotsOptionTypeInclude)}) + + _, err = shareClient.CreateSnapshot(context.Background(), nil) + _require.NoError(err) + + _, err = shareClient.Delete(context.Background(), nil) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.ShareHasSnapshots) +} + +func (s *ShareRecordedTestsSuite) TestShareRestoreSuccess() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountSoftDelete, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + _, err = shareClient.Delete(context.Background(), nil) + _require.NoError(err) + + // wait for share deletion + time.Sleep(60 * time.Second) + + pager := svcClient.NewListSharesPager(&service.ListSharesOptions{ + Include: service.ListSharesInclude{Deleted: true}, + Prefix: &shareName, + }) + + shareVersion := "" + shareCtr := 0 + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.NoError(err) + + for _, s := range resp.Shares { + if s.Deleted != nil && *s.Deleted { + shareVersion = *s.Version + } else { + shareCtr++ + } + } + } + _require.NotEmpty(shareVersion) + _require.Equal(shareCtr, 0) + + rResp, err := shareClient.Restore(context.Background(), shareVersion, nil) + _require.NoError(err) + _require.NotNil(rResp.ETag) + _require.NotNil(rResp.RequestID) + _require.NotNil(rResp.Version) + + pager = svcClient.NewListSharesPager(&service.ListSharesOptions{ + Prefix: &shareName, + }) + + shareCtr = 0 + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.NoError(err) + shareCtr += len(resp.Shares) + } + _require.Equal(shareCtr, 1) +} + +func (s *ShareRecordedTestsSuite) TestShareRestoreFailures() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountSoftDelete, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer testcommon.DeleteShare(context.Background(), _require, shareClient) + + _, err = shareClient.Restore(context.Background(), "", nil) + _require.Error(err) + testcommon.ValidateFileErrorCode(_require, err, fileerror.MissingRequiredHeader) +} + +func (s *ShareRecordedTestsSuite) TestShareRestoreWithSnapshotsAgain() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountSoftDelete, nil) + _require.NoError(err) + + shareName := testcommon.GenerateShareName(testName) + shareClient := testcommon.CreateNewShare(context.Background(), _require, shareName, svcClient) + defer deleteShare(context.Background(), _require, shareClient, &share.DeleteOptions{DeleteSnapshots: to.Ptr(share.DeleteSnapshotsOptionTypeInclude)}) + + cResp, err := shareClient.CreateSnapshot(context.Background(), nil) + _require.NoError(err) + _require.NotNil(cResp.Snapshot) + + _, err = shareClient.Delete(context.Background(), &share.DeleteOptions{ + DeleteSnapshots: to.Ptr(share.DeleteSnapshotsOptionTypeInclude), + }) + _require.NoError(err) + + // wait for share deletion + time.Sleep(60 * time.Second) + + pager := svcClient.NewListSharesPager(&service.ListSharesOptions{ + Include: service.ListSharesInclude{Deleted: true}, + Prefix: &shareName, + }) + + shareVersion := "" + shareCtr := 0 + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.NoError(err) + + for _, s := range resp.Shares { + if s.Deleted != nil && *s.Deleted { + shareVersion = *s.Version + } else { + shareCtr++ + } + } + } + _require.NotEmpty(shareVersion) + _require.Equal(shareCtr, 0) + + rResp, err := shareClient.Restore(context.Background(), shareVersion, nil) + _require.NoError(err) + _require.NotNil(rResp.ETag) + _require.NotNil(rResp.RequestID) + _require.NotNil(rResp.Version) + + pager = svcClient.NewListSharesPager(&service.ListSharesOptions{ + Include: service.ListSharesInclude{Snapshots: true}, + Prefix: &shareName, + }) + + shareCtr = 0 + for pager.More() { + resp, err := pager.NextPage(context.Background()) + _require.NoError(err) + shareCtr += len(resp.Shares) + for _, s := range resp.Shares { + if s.Snapshot != nil { + _require.Equal(*s.Snapshot, *cResp.Snapshot) + } + } + } + _require.Equal(shareCtr, 2) // 1 share and 1 snapshot +} + +func (s *ShareRecordedTestsSuite) TestSASShareClientNoKey() { + _require := require.New(s.T()) + accountName, _ := testcommon.GetGenericAccountInfo(testcommon.TestAccountDefault) + _require.Greater(len(accountName), 0) + + testName := s.T().Name() + shareName := testcommon.GenerateShareName(testName) + shareClient, err := share.NewClientWithNoCredential(fmt.Sprintf("https://%s.file.core.windows.net/%v", accountName, shareName), nil) + _require.NoError(err) + + permissions := sas.SharePermissions{ + Read: true, + Write: true, + Delete: true, + List: true, + Create: true, + } + expiry := time.Now().Add(time.Hour) + + _, err = shareClient.GetSASURL(permissions, expiry, nil) + _require.Equal(err, fileerror.MissingSharedKeyCredential) +} + +func (s *ShareRecordedTestsSuite) TestSASShareClientSignNegative() { + _require := require.New(s.T()) + accountName, accountKey := testcommon.GetGenericAccountInfo(testcommon.TestAccountDefault) + _require.Greater(len(accountName), 0) + _require.Greater(len(accountKey), 0) + + cred, err := share.NewSharedKeyCredential(accountName, accountKey) + _require.NoError(err) + + testName := s.T().Name() + shareName := testcommon.GenerateShareName(testName) + shareClient, err := share.NewClientWithSharedKeyCredential(fmt.Sprintf("https://%s.file.core.windows.net/%v", accountName, shareName), cred, nil) + _require.NoError(err) + + permissions := sas.SharePermissions{ + Read: true, + Write: true, + Delete: true, + List: true, + Create: true, + } + expiry := time.Time{} + + // zero expiry time + _, err = shareClient.GetSASURL(permissions, expiry, &share.GetSASURLOptions{StartTime: to.Ptr(time.Now())}) + _require.Equal(err.Error(), "service SAS is missing at least one of these: ExpiryTime or Permissions") + + // zero start and expiry time + _, err = shareClient.GetSASURL(permissions, expiry, &share.GetSASURLOptions{}) + _require.Equal(err.Error(), "service SAS is missing at least one of these: ExpiryTime or Permissions") + + // empty permissions + _, err = shareClient.GetSASURL(sas.SharePermissions{}, expiry, nil) + _require.Equal(err.Error(), "service SAS is missing at least one of these: ExpiryTime or Permissions") +} diff --git a/sdk/storage/azfile/share/constants.go b/sdk/storage/azfile/share/constants.go new file mode 100644 index 000000000000..231ab9e27e09 --- /dev/null +++ b/sdk/storage/azfile/share/constants.go @@ -0,0 +1,50 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package share + +import "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" + +// AccessTier defines values for the access tier of the share. +type AccessTier = generated.ShareAccessTier + +const ( + AccessTierCool AccessTier = generated.ShareAccessTierCool + AccessTierHot AccessTier = generated.ShareAccessTierHot + AccessTierTransactionOptimized AccessTier = generated.ShareAccessTierTransactionOptimized +) + +// PossibleAccessTierValues returns the possible values for the AccessTier const type. +func PossibleAccessTierValues() []AccessTier { + return generated.PossibleShareAccessTierValues() +} + +// RootSquash defines values for the root squashing behavior on the share when NFS is enabled. If it's not specified, the default is NoRootSquash. +type RootSquash = generated.ShareRootSquash + +const ( + RootSquashNoRootSquash RootSquash = generated.ShareRootSquashNoRootSquash + RootSquashRootSquash RootSquash = generated.ShareRootSquashRootSquash + RootSquashAllSquash RootSquash = generated.ShareRootSquashAllSquash +) + +// PossibleRootSquashValues returns the possible values for the RootSquash const type. +func PossibleRootSquashValues() []RootSquash { + return generated.PossibleShareRootSquashValues() +} + +// DeleteSnapshotsOptionType defines values for DeleteSnapshotsOptionType +type DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionType + +const ( + DeleteSnapshotsOptionTypeInclude DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionTypeInclude + DeleteSnapshotsOptionTypeIncludeLeased DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionTypeIncludeLeased +) + +// PossibleDeleteSnapshotsOptionTypeValues returns the possible values for the DeleteSnapshotsOptionType const type. +func PossibleDeleteSnapshotsOptionTypeValues() []DeleteSnapshotsOptionType { + return generated.PossibleDeleteSnapshotsOptionTypeValues() +} diff --git a/sdk/storage/azfile/share/examples_test.go b/sdk/storage/azfile/share/examples_test.go new file mode 100644 index 000000000000..bb4739e9b151 --- /dev/null +++ b/sdk/storage/azfile/share/examples_test.go @@ -0,0 +1,464 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package share_test + +import ( + "context" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/sas" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/service" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/share" + "log" + "os" + "time" +) + +func handleError(err error) { + if err != nil { + log.Fatal(err.Error()) + } +} + +func Example_share_Client_NewClient() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + shareName := "testshare" + shareURL := fmt.Sprintf("https://%s.file.core.windows.net/%s", accountName, shareName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + shareClient, err := share.NewClientWithSharedKeyCredential(shareURL, cred, nil) + handleError(err) + + fmt.Println(shareClient.URL()) +} + +func Example_share_Client_NewClientFromConnectionString() { + // Your connection string can be obtained from the Azure Portal. + connectionString, ok := os.LookupEnv("AZURE_STORAGE_CONNECTION_STRING") + if !ok { + log.Fatal("the environment variable 'AZURE_STORAGE_CONNECTION_STRING' could not be found") + } + + shareName := "testshare" + shareClient, err := share.NewClientFromConnectionString(connectionString, shareName, nil) + handleError(err) + + fmt.Println(shareClient.URL()) +} + +func Example_share_Client_NewDirectoryClient() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + shareName := "testshare" + shareURL := fmt.Sprintf("https://%s.file.core.windows.net/%s", accountName, shareName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + shareClient, err := share.NewClientWithSharedKeyCredential(shareURL, cred, nil) + handleError(err) + + dirName := "testdirectory" + dirClient := shareClient.NewDirectoryClient(dirName) + + fmt.Println(dirClient.URL()) +} + +func Example_share_Client_NewRootDirectoryClient() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + shareName := "testshare" + shareURL := fmt.Sprintf("https://%s.file.core.windows.net/%s", accountName, shareName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + shareClient, err := share.NewClientWithSharedKeyCredential(shareURL, cred, nil) + handleError(err) + + dirClient := shareClient.NewRootDirectoryClient() + + fmt.Println(dirClient.URL()) +} + +func Example_share_Client_CreateSnapshot() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + shareName := "testshare" + shareURL := fmt.Sprintf("https://%s.file.core.windows.net/%s", accountName, shareName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + shareClient, err := share.NewClientWithSharedKeyCredential(shareURL, cred, nil) + handleError(err) + + snapResp, err := shareClient.CreateSnapshot(context.TODO(), nil) + handleError(err) + shareSnapshot := *snapResp.Snapshot + + snapshotShareClient, err := shareClient.WithSnapshot(shareSnapshot) + handleError(err) + + fmt.Println(snapshotShareClient.URL()) + + _, err = snapshotShareClient.GetProperties(context.TODO(), nil) + handleError(err) +} + +func Example_share_Client_Create() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + shareName := "testshare" + shareURL := fmt.Sprintf("https://%s.file.core.windows.net/%s", accountName, shareName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + shareClient, err := share.NewClientWithSharedKeyCredential(shareURL, cred, nil) + handleError(err) + + _, err = shareClient.Create(context.TODO(), nil) + handleError(err) +} + +func Example_share_Client_Delete() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + shareName := "testshare" + shareURL := fmt.Sprintf("https://%s.file.core.windows.net/%s", accountName, shareName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + shareClient, err := share.NewClientWithSharedKeyCredential(shareURL, cred, nil) + handleError(err) + + _, err = shareClient.Delete(context.TODO(), nil) + handleError(err) +} + +func Example_share_Client_Restore() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + shareName := "testshare" + serviceURL := fmt.Sprintf("https://%s.file.core.windows.net/", accountName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + svcClient, err := service.NewClientWithSharedKeyCredential(serviceURL, cred, nil) + handleError(err) + + shareClient := svcClient.NewShareClient(shareName) + + // get share version for restore operation + pager := svcClient.NewListSharesPager(&service.ListSharesOptions{ + Include: service.ListSharesInclude{Deleted: true}, // Include deleted shares in the result + }) + + for pager.More() { + resp, err := pager.NextPage(context.Background()) + handleError(err) + for _, s := range resp.Shares { + if s.Deleted != nil && *s.Deleted { + _, err = shareClient.Restore(context.TODO(), *s.Version, nil) + handleError(err) + } + } + } +} + +func Example_share_Client_GetProperties() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + shareName := "testshare" + shareURL := fmt.Sprintf("https://%s.file.core.windows.net/%s", accountName, shareName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + shareClient, err := share.NewClientWithSharedKeyCredential(shareURL, cred, nil) + handleError(err) + + _, err = shareClient.GetProperties(context.TODO(), nil) + handleError(err) +} + +func Example_share_Client_SetProperties() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + shareName := "testshare" + shareURL := fmt.Sprintf("https://%s.file.core.windows.net/%s", accountName, shareName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + shareClient, err := share.NewClientWithSharedKeyCredential(shareURL, cred, nil) + handleError(err) + + _, err = shareClient.SetProperties(context.TODO(), &share.SetPropertiesOptions{ + Quota: to.Ptr(int32(1000)), + AccessTier: to.Ptr(share.AccessTierHot), + }) + handleError(err) +} + +func Example_share_Client_AccessPolicy() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + shareName := "testshare" + shareURL := fmt.Sprintf("https://%s.file.core.windows.net/%s", accountName, shareName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + shareClient, err := share.NewClientWithSharedKeyCredential(shareURL, cred, nil) + handleError(err) + + permission := share.AccessPolicyPermission{Read: true, Write: true, Create: true, Delete: true, List: true}.String() + permissions := []*share.SignedIdentifier{ + { + ID: to.Ptr("1"), + AccessPolicy: &share.AccessPolicy{ + Start: to.Ptr(time.Now()), + Expiry: to.Ptr(time.Now().Add(time.Hour)), + Permission: &permission, + }, + }} + + _, err = shareClient.SetAccessPolicy(context.TODO(), &share.SetAccessPolicyOptions{ + ShareACL: permissions, + }) + handleError(err) + + resp, err := shareClient.GetAccessPolicy(context.TODO(), nil) + handleError(err) + + fmt.Println(*resp.SignedIdentifiers[0].AccessPolicy.Permission) +} + +func Example_share_Client_CreateGetPermission() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + shareName := "testshare" + shareURL := fmt.Sprintf("https://%s.file.core.windows.net/%s", accountName, shareName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + shareClient, err := share.NewClientWithSharedKeyCredential(shareURL, cred, nil) + handleError(err) + + testSDDL := `O:S-1-5-32-548G:S-1-5-21-397955417-626881126-188441444-512D:(A;;RPWPCCDCLCSWRCWDWOGA;;;S-1-0-0)` + createResp, err := shareClient.CreatePermission(context.TODO(), testSDDL, nil) + handleError(err) + + getResp, err := shareClient.GetPermission(context.TODO(), *createResp.FilePermissionKey, nil) + handleError(err) + fmt.Println(*getResp.Permission) +} + +func Example_share_Client_SetMetadata() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + shareName := "testshare" + shareURL := fmt.Sprintf("https://%s.file.core.windows.net/%s", accountName, shareName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + shareClient, err := share.NewClientWithSharedKeyCredential(shareURL, cred, nil) + handleError(err) + + md := map[string]*string{ + "Foo": to.Ptr("FooValuE"), + "Bar": to.Ptr("bArvaLue"), + } + _, err = shareClient.SetMetadata(context.TODO(), &share.SetMetadataOptions{ + Metadata: md, + }) + handleError(err) + + resp, err := shareClient.GetProperties(context.TODO(), nil) + handleError(err) + for k, v := range resp.Metadata { + fmt.Printf("%v : %v\n", k, *v) + } +} + +func Example_share_Client_GetStatistics() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + shareName := "testshare" + shareURL := fmt.Sprintf("https://%s.file.core.windows.net/%s", accountName, shareName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + shareClient, err := share.NewClientWithSharedKeyCredential(shareURL, cred, nil) + handleError(err) + + getStats, err := shareClient.GetStatistics(context.Background(), nil) + handleError(err) + fmt.Println(*getStats.ShareUsageBytes) +} + +func Example_share_Client_GetSASURL() { + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + shareName := "testshare" + shareURL := fmt.Sprintf("https://%s.file.core.windows.net/%s", accountName, shareName) + + cred, err := service.NewSharedKeyCredential(accountName, accountKey) + handleError(err) + + shareClient, err := share.NewClientWithSharedKeyCredential(shareURL, cred, nil) + handleError(err) + + permissions := sas.SharePermissions{ + Read: true, + Write: true, + Delete: true, + List: true, + Create: true, + } + expiry := time.Now().Add(time.Hour) + + shareSASURL, err := shareClient.GetSASURL(permissions, expiry, nil) + handleError(err) + + fmt.Println("SAS URL: ", shareSASURL) + + shareSASClient, err := share.NewClientWithNoCredential(shareSASURL, nil) + handleError(err) + + var dirs, files []string + pager := shareSASClient.NewRootDirectoryClient().NewListFilesAndDirectoriesPager(nil) + for pager.More() { + resp, err := pager.NextPage(context.Background()) + handleError(err) + + for _, d := range resp.Segment.Directories { + dirs = append(dirs, *d.Name) + } + for _, f := range resp.Segment.Files { + files = append(files, *f.Name) + } + } + + fmt.Println("Directories:") + for _, d := range dirs { + fmt.Println(d) + } + + fmt.Println("Files:") + for _, f := range files { + fmt.Println(f) + } +} diff --git a/sdk/storage/azfile/share/models.go b/sdk/storage/azfile/share/models.go new file mode 100644 index 000000000000..5b200ce9429b --- /dev/null +++ b/sdk/storage/azfile/share/models.go @@ -0,0 +1,312 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package share + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" + "time" +) + +// SharedKeyCredential contains an account's name and its primary or secondary key. +type SharedKeyCredential = exported.SharedKeyCredential + +// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the +// storage account's name and either its primary or secondary key. +func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) { + return exported.NewSharedKeyCredential(accountName, accountKey) +} + +// --------------------------------------------------------------------------------------------------------------------- + +// CreateOptions contains the optional parameters for the Client.Create method. +type CreateOptions struct { + // Specifies the access tier of the share. + AccessTier *AccessTier + // Protocols to enable on the share. + EnabledProtocols *string + // A name-value pair to associate with a file storage object. + Metadata map[string]*string + // Specifies the maximum size of the share, in gigabytes. + Quota *int32 + // Root squash to set on the share. Only valid for NFS shares. + RootSquash *RootSquash +} + +func (o *CreateOptions) format() *generated.ShareClientCreateOptions { + if o == nil { + return nil + } + + return &generated.ShareClientCreateOptions{ + AccessTier: o.AccessTier, + EnabledProtocols: o.EnabledProtocols, + Metadata: o.Metadata, + Quota: o.Quota, + RootSquash: o.RootSquash, + } +} + +// --------------------------------------------------------------------------------------------------------------------- + +// DeleteOptions contains the optional parameters for the Client.Delete method. +type DeleteOptions struct { + // Specifies the option include to delete the base share and all of its snapshots. + DeleteSnapshots *DeleteSnapshotsOptionType + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + ShareSnapshot *string + // LeaseAccessConditions contains optional parameters to access leased entity. + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *DeleteOptions) format() (*generated.ShareClientDeleteOptions, *LeaseAccessConditions) { + if o == nil { + return nil, nil + } + + return &generated.ShareClientDeleteOptions{ + DeleteSnapshots: o.DeleteSnapshots, + Sharesnapshot: o.ShareSnapshot, + }, o.LeaseAccessConditions +} + +// LeaseAccessConditions contains optional parameters to access leased entity. +type LeaseAccessConditions = generated.LeaseAccessConditions + +// --------------------------------------------------------------------------------------------------------------------- + +// RestoreOptions contains the optional parameters for the Client.Restore method. +type RestoreOptions struct { + // placeholder for future options +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetPropertiesOptions contains the optional parameters for the Client.GetProperties method. +type GetPropertiesOptions struct { + // The snapshot parameter is an opaque DateTime value that, when present, specifies the share snapshot to query. + ShareSnapshot *string + // LeaseAccessConditions contains optional parameters to access leased entity. + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *GetPropertiesOptions) format() (*generated.ShareClientGetPropertiesOptions, *LeaseAccessConditions) { + if o == nil { + return nil, nil + } + + return &generated.ShareClientGetPropertiesOptions{ + Sharesnapshot: o.ShareSnapshot, + }, o.LeaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetPropertiesOptions contains the optional parameters for the Client.SetProperties method. +type SetPropertiesOptions struct { + // Specifies the access tier of the share. + AccessTier *AccessTier + // Specifies the maximum size of the share, in gigabytes. + Quota *int32 + // Root squash to set on the share. Only valid for NFS shares. + RootSquash *RootSquash + // LeaseAccessConditions contains optional parameters to access leased entity. + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *SetPropertiesOptions) format() (*generated.ShareClientSetPropertiesOptions, *LeaseAccessConditions) { + if o == nil { + return nil, nil + } + + return &generated.ShareClientSetPropertiesOptions{ + AccessTier: o.AccessTier, + Quota: o.Quota, + RootSquash: o.RootSquash, + }, o.LeaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// CreateSnapshotOptions contains the optional parameters for the Client.CreateSnapshot method. +type CreateSnapshotOptions struct { + // A name-value pair to associate with a file storage object. + Metadata map[string]*string +} + +func (o *CreateSnapshotOptions) format() *generated.ShareClientCreateSnapshotOptions { + if o == nil { + return nil + } + + return &generated.ShareClientCreateSnapshotOptions{ + Metadata: o.Metadata, + } +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetAccessPolicyOptions contains the optional parameters for the Client.GetAccessPolicy method. +type GetAccessPolicyOptions struct { + // LeaseAccessConditions contains optional parameters to access leased entity. + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *GetAccessPolicyOptions) format() (*generated.ShareClientGetAccessPolicyOptions, *LeaseAccessConditions) { + if o == nil { + return nil, nil + } + + return nil, o.LeaseAccessConditions +} + +// SignedIdentifier - Signed identifier. +type SignedIdentifier = generated.SignedIdentifier + +// AccessPolicy - An Access policy. +type AccessPolicy = generated.AccessPolicy + +// AccessPolicyPermission type simplifies creating the permissions string for a share's access policy. +// Initialize an instance of this type and then call its String method to set AccessPolicy's permission field. +type AccessPolicyPermission = exported.AccessPolicyPermission + +// --------------------------------------------------------------------------------------------------------------------- + +// SetAccessPolicyOptions contains the optional parameters for the Client.SetAccessPolicy method. +type SetAccessPolicyOptions struct { + // Specifies the ACL for the share. + ShareACL []*SignedIdentifier + // LeaseAccessConditions contains optional parameters to access leased entity. + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *SetAccessPolicyOptions) format() (*generated.ShareClientSetAccessPolicyOptions, []*SignedIdentifier, *LeaseAccessConditions, error) { + if o == nil { + return nil, nil, nil, nil + } + + if o.ShareACL != nil { + for _, si := range o.ShareACL { + err := formatTime(si) + if err != nil { + return nil, nil, nil, err + } + } + } + + return nil, o.ShareACL, o.LeaseAccessConditions, nil +} + +func formatTime(si *SignedIdentifier) error { + if si.AccessPolicy == nil { + return nil + } + + if si.AccessPolicy.Start != nil { + st, err := time.Parse(time.RFC3339, si.AccessPolicy.Start.UTC().Format(time.RFC3339)) + if err != nil { + return err + } + si.AccessPolicy.Start = &st + } + if si.AccessPolicy.Expiry != nil { + et, err := time.Parse(time.RFC3339, si.AccessPolicy.Expiry.UTC().Format(time.RFC3339)) + if err != nil { + return err + } + si.AccessPolicy.Expiry = &et + } + + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// CreatePermissionOptions contains the optional parameters for the Client.CreatePermission method. +type CreatePermissionOptions struct { + // placeholder for future options +} + +func (o *CreatePermissionOptions) format(sharePermission string) (Permission, *generated.ShareClientCreatePermissionOptions) { + return Permission{ + Permission: &sharePermission, + }, nil +} + +// Permission - A permission (a security descriptor) at the share level. +type Permission = generated.SharePermission + +// --------------------------------------------------------------------------------------------------------------------- + +// GetPermissionOptions contains the optional parameters for the Client.GetPermission method. +type GetPermissionOptions struct { + // placeholder for future options +} + +func (o *GetPermissionOptions) format() *generated.ShareClientGetPermissionOptions { + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// SetMetadataOptions contains the optional parameters for the Client.SetMetadata method. +type SetMetadataOptions struct { + // A name-value pair to associate with a file storage object. + Metadata map[string]*string + // LeaseAccessConditions contains optional parameters to access leased entity. + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *SetMetadataOptions) format() (*generated.ShareClientSetMetadataOptions, *LeaseAccessConditions) { + if o == nil { + return nil, nil + } + + return &generated.ShareClientSetMetadataOptions{ + Metadata: o.Metadata, + }, o.LeaseAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetStatisticsOptions contains the optional parameters for the Client.GetStatistics method. +type GetStatisticsOptions struct { + // LeaseAccessConditions contains optional parameters to access leased entity. + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *GetStatisticsOptions) format() (*generated.ShareClientGetStatisticsOptions, *LeaseAccessConditions) { + if o == nil { + return nil, nil + } + + return nil, o.LeaseAccessConditions +} + +// Stats - Stats for the share. +type Stats = generated.ShareStats + +// --------------------------------------------------------------------------------------------------------------------- + +// GetSASURLOptions contains the optional parameters for the Client.GetSASURL method. +type GetSASURLOptions struct { + StartTime *time.Time +} + +func (o *GetSASURLOptions) format() time.Time { + if o == nil { + return time.Time{} + } + + var st time.Time + if o.StartTime != nil { + st = o.StartTime.UTC() + } else { + st = time.Time{} + } + return st +} diff --git a/sdk/storage/azfile/share/responses.go b/sdk/storage/azfile/share/responses.go new file mode 100644 index 000000000000..2932e7ec93a9 --- /dev/null +++ b/sdk/storage/azfile/share/responses.go @@ -0,0 +1,45 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package share + +import "github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/internal/generated" + +// CreateResponse contains the response from method Client.Create. +type CreateResponse = generated.ShareClientCreateResponse + +// DeleteResponse contains the response from method Client.Delete. +type DeleteResponse = generated.ShareClientDeleteResponse + +// RestoreResponse contains the response from method Client.Restore. +type RestoreResponse = generated.ShareClientRestoreResponse + +// GetPropertiesResponse contains the response from method Client.GetProperties. +type GetPropertiesResponse = generated.ShareClientGetPropertiesResponse + +// SetPropertiesResponse contains the response from method Client.SetProperties. +type SetPropertiesResponse = generated.ShareClientSetPropertiesResponse + +// CreateSnapshotResponse contains the response from method Client.CreateSnapshot. +type CreateSnapshotResponse = generated.ShareClientCreateSnapshotResponse + +// GetAccessPolicyResponse contains the response from method Client.GetAccessPolicy. +type GetAccessPolicyResponse = generated.ShareClientGetAccessPolicyResponse + +// SetAccessPolicyResponse contains the response from method Client.SetAccessPolicy. +type SetAccessPolicyResponse = generated.ShareClientSetAccessPolicyResponse + +// CreatePermissionResponse contains the response from method Client.CreatePermission. +type CreatePermissionResponse = generated.ShareClientCreatePermissionResponse + +// GetPermissionResponse contains the response from method Client.GetPermission. +type GetPermissionResponse = generated.ShareClientGetPermissionResponse + +// SetMetadataResponse contains the response from method Client.SetMetadata. +type SetMetadataResponse = generated.ShareClientSetMetadataResponse + +// GetStatisticsResponse contains the response from method Client.GetStatistics. +type GetStatisticsResponse = generated.ShareClientGetStatisticsResponse diff --git a/sdk/storage/azfile/test-resources.json b/sdk/storage/azfile/test-resources.json new file mode 100644 index 000000000000..c6259f7ab02f --- /dev/null +++ b/sdk/storage/azfile/test-resources.json @@ -0,0 +1,579 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "baseName": { + "type": "String" + }, + "tenantId": { + "type": "string", + "defaultValue": "72f988bf-86f1-41af-91ab-2d7cd011db47", + "metadata": { + "description": "The tenant ID to which the application and resources belong." + } + }, + "testApplicationOid": { + "type": "string", + "metadata": { + "description": "The principal to assign the role to. This is application object id." + } + } + }, + "variables": { + "mgmtApiVersion": "2022-09-01", + "authorizationApiVersion": "2018-09-01-preview", + "blobDataContributorRoleId": "[concat('/subscriptions/', subscription().subscriptionId, '/providers/Microsoft.Authorization/roleDefinitions/ba92f5b4-2d11-453d-a403-e96b0029c9fe')]", + "contributorRoleId": "[concat('/subscriptions/', subscription().subscriptionId, '/providers/Microsoft.Authorization/roleDefinitions/b24988ac-6180-42a0-ab88-20f7382dd24c')]", + "blobDataOwnerRoleId": "[concat('/subscriptions/', subscription().subscriptionId, '/providers/Microsoft.Authorization/roleDefinitions/b7e6dc6d-f1e8-4753-8033-0f276bb0955b')]", + "primaryAccountName": "[concat(parameters('baseName'), 'prim')]", + "immutableAccountName": "[concat(parameters('baseName'), 'imm')]", + "primaryEncryptionScopeName": "encryptionScope", + "primaryEncryptionScope": "[concat(parameters('baseName'), 'prim', concat('/', variables('primaryEncryptionScopeName')))]", + "secondaryAccountName": "[concat(parameters('baseName'), 'sec')]", + "premiumAccountName": "[concat(parameters('baseName'), 'prem')]", + "dataLakeAccountName": "[concat(parameters('baseName'), 'dtlk')]", + "softDeleteAccountName": "[concat(parameters('baseName'), 'sftdl')]", + "premiumFileAccountName": "[concat(parameters('baseName'), 'pfile')]", + "webjobsPrimaryAccountName": "[concat(parameters('baseName'), 'wjprim')]", + "webjobsSecondaryAccountName": "[concat(parameters('baseName'), 'wjsec')]", + "location": "[resourceGroup().location]", + "resourceGroupName": "[resourceGroup().name]", + "subscriptionId": "[subscription().subscriptionId]", + "encryption": { + "services": { + "file": { + "enabled": true + }, + "blob": { + "enabled": true + } + }, + "keySource": "Microsoft.Storage" + }, + "networkAcls": { + "bypass": "AzureServices", + "virtualNetworkRules": [], + "ipRules": [], + "defaultAction": "Allow" + } + }, + "resources": [ + { + "type": "Microsoft.Authorization/roleAssignments", + "apiVersion": "[variables('authorizationApiVersion')]", + "name": "[guid(concat('dataContributorRoleId', resourceGroup().id))]", + "properties": { + "roleDefinitionId": "[variables('blobDataContributorRoleId')]", + "principalId": "[parameters('testApplicationOid')]" + } + }, + { + "type": "Microsoft.Authorization/roleAssignments", + "apiVersion": "[variables('authorizationApiVersion')]", + "name": "[guid(concat('contributorRoleId', resourceGroup().id))]", + "properties": { + "roleDefinitionId": "[variables('contributorRoleId')]", + "principalId": "[parameters('testApplicationOid')]" + } + }, + { + "type": "Microsoft.Authorization/roleAssignments", + "apiVersion": "[variables('authorizationApiVersion')]", + "name": "[guid(concat('blobDataOwnerRoleId', resourceGroup().id))]", + "properties": { + "roleDefinitionId": "[variables('blobDataOwnerRoleId')]", + "principalId": "[parameters('testApplicationOid')]" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('primaryAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts/blobServices", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[concat(variables('primaryAccountName'), '/default')]", + "properties": { + "isVersioningEnabled": true, + "lastAccessTimeTrackingPolicy": { + "enable": true, + "name": "AccessTimeTracking", + "trackingGranularityInDays": 1, + "blobType": [ + "blockBlob" + ] + } + }, + "dependsOn": [ + "[variables('primaryAccountName')]" + ] + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('immutableAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot", + "immutableStorageWithVersioning": { + "enabled": true + } + } + }, + { + "type": "Microsoft.Storage/storageAccounts/blobServices", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[concat(variables('immutableAccountName'), '/default')]", + "properties": { + "isVersioningEnabled": true, + "lastAccessTimeTrackingPolicy": { + "enable": true, + "name": "AccessTimeTracking", + "trackingGranularityInDays": 1, + "blobType": [ + "blockBlob" + ] + } + }, + "dependsOn": [ + "[variables('immutableAccountName')]" + ] + }, + { + "type": "Microsoft.Storage/storageAccounts/encryptionScopes", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('primaryEncryptionScope')]", + "properties": { + "source": "Microsoft.Storage", + "state": "Enabled" + }, + "dependsOn": [ + "[variables('primaryAccountName')]" + ] + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('secondaryAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('premiumAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Premium_LRS", + "tier": "Premium" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('dataLakeAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "isHnsEnabled": true, + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts/blobServices", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[concat(variables('dataLakeAccountName'), '/default')]", + "properties": { + "containerDeleteRetentionPolicy": { + "enabled": true, + "days": 1 + } + }, + "dependsOn": [ + "[variables('dataLakeAccountName')]" + ] + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('softDeleteAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts/blobServices", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[concat(variables('softDeleteAccountName'), '/default')]", + "properties": { + "isVersioningEnabled": true, + "deleteRetentionPolicy": { + "allowPermanentDelete": true, + "enabled": true, + "days": 1 + }, + "containerDeleteRetentionPolicy": { + "enabled": true, + "days": 1 + } + }, + "dependsOn": [ + "[variables('softDeleteAccountName')]" + ] + }, + { + "type": "Microsoft.Storage/storageAccounts/fileServices", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[concat(variables('softDeleteAccountName'), '/default')]", + "properties": { + "shareDeleteRetentionPolicy": { + "enabled": true, + "days": 1 + } + }, + "dependsOn": [ + "[variables('softDeleteAccountName')]" + ] + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('premiumFileAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Premium_LRS", + "tier": "Premium" + }, + "kind": "FileStorage", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('webjobsPrimaryAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + }, + { + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "[variables('mgmtApiVersion')]", + "name": "[variables('webjobsSecondaryAccountName')]", + "location": "[variables('location')]", + "sku": { + "name": "Standard_RAGRS", + "tier": "Standard" + }, + "kind": "StorageV2", + "properties": { + "networkAcls": "[variables('networkAcls')]", + "supportsHttpsTrafficOnly": true, + "encryption": "[variables('encryption')]", + "accessTier": "Hot" + } + } + ], + "functions": [ + { + "namespace": "url", + "members": { + "serviceEndpointSuffix": { + "parameters": [ + { + "name": "endpoint", + "type": "string" + } + ], + "output": { + "type": "string", + "value": "[substring(parameters('endpoint'), add(indexOf(parameters('endpoint'), '.'),1), sub(length(parameters('endpoint')), add(indexOf(parameters('endpoint'), '.'),2)))]" + } + } + } + }, + { + "namespace": "connectionString", + "members": { + "create": { + "parameters": [ + { + "name": "accountName", + "type": "string" + }, + { + "name": "accountKey", + "type": "string" + }, + { + "name": "blobEndpoint", + "type": "string" + }, + { + "name": "queueEndpoint", + "type": "string" + }, + { + "name": "fileEndpoint", + "type": "string" + }, + { + "name": "tableEndpoint", + "type": "string" + } + ], + "output": { + "type": "string", + "value": "[concat('DefaultEndpointsProtocol=https;AccountName=', parameters('accountName'), ';AccountKey=', parameters('accountKey'), ';BlobEndpoint=', parameters('blobEndpoint'), ';QueueEndpoint=', parameters('queueEndpoint'), ';FileEndpoint=', parameters('fileEndpoint'), ';TableEndpoint=', parameters('tableEndpoint'))]" + } + } + } + } + ], + "outputs": { + "AZURE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('primaryAccountName')]" + }, + "AZURE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "PRIMARY_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" + }, + "PRIMARY_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" + }, + "PRIMARY_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]" + }, + "PRIMARY_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "SECONDARY_AZURE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('secondaryAccountName')]" + }, + "SECONDARY_AZURE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "SECONDARY_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" + }, + "SECONDARY_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" + }, + "SECONDARY_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]" + }, + "SECONDARY_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "BLOB_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('secondaryAccountName')]" + }, + "BLOB_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "PREMIUM_AZURE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('premiumAccountName')]" + }, + "PREMIUM_AZURE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('premiumAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "PREMIUM_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('premiumAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" + }, + "DATALAKE_AZURE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('dataLakeAccountName')]" + }, + "DATALAKE_AZURE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "DATALAKE_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" + }, + "DATALAKE_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" + }, + "DATALAKE_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]" + }, + "DATALAKE_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "IMMUTABLE_AZURE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('immutableAccountName')]" + }, + "IMMUTABLE_AZURE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('immutableAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "IMMUTABLE_AZURE_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('immutableAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" + }, + "IMMUTABLE_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('immutableAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" + }, + "IMMUTABLE_AZURE_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('immutableAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]" + }, + "IMMUTABLE_AZURE_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('immutableAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('softDeleteAccountName')]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]" + }, + "SOFT_DELETE_AZURE_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "PREMIUM_FILE_STORAGE_ACCOUNT_NAME": { + "type": "string", + "value": "[variables('premiumFileAccountName')]" + }, + "PREMIUM_FILE_STORAGE_ACCOUNT_KEY": { + "type": "string", + "value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('premiumFileAccountName')), variables('mgmtApiVersion')).keys[0].value]" + }, + "PREMIUM_FILE_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": { + "type": "string", + "value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('premiumFileAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]" + }, + "AZUREWEBJOBSSTORAGE": { + "type": "string", + "value": "[connectionString.create(variables('webjobsPrimaryAccountName'), listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).keys[0].value, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "AZUREWEBJOBSSECONDARYSTORAGE": { + "type": "string", + "value": "[connectionString.create(variables('webjobsSecondaryAccountName'), listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).keys[0].value, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]" + }, + "RESOURCE_GROUP_NAME": { + "type": "string", + "value": "[variables('resourceGroupName')]" + }, + "SUBSCRIPTION_ID": { + "type": "string", + "value": "[variables('subscriptionId')]" + }, + "LOCATION": { + "type": "string", + "value": "[variables('location')]" + }, + "AZURE_STORAGE_ENCRYPTION_SCOPE": { + "type": "string", + "value": "[variables('primaryEncryptionScopeName')]" + } + } + } + \ No newline at end of file From 0cbfd88756304b154dc877a50f721b7ec454a0e8 Mon Sep 17 00:00:00 2001 From: Sourav Gupta <98318303+souravgupta-msft@users.noreply.github.com> Date: Mon, 8 May 2023 21:13:07 +0530 Subject: [PATCH 27/50] azfile: Fixing connection string parsing logic (#20798) * Fixing connection string parse logic * Update README --- sdk/storage/azfile/README.md | 6 +-- sdk/storage/azfile/file/client_test.go | 2 - sdk/storage/azfile/file/models.go | 2 +- sdk/storage/azfile/internal/shared/shared.go | 33 +++++++++++---- .../azfile/internal/shared/shared_test.go | 40 +++++++++++++++++++ 5 files changed, 69 insertions(+), 14 deletions(-) diff --git a/sdk/storage/azfile/README.md b/sdk/storage/azfile/README.md index 013c2d022248..2b6ba2b9a02e 100644 --- a/sdk/storage/azfile/README.md +++ b/sdk/storage/azfile/README.md @@ -245,7 +245,7 @@ or contact [opencode@microsoft.com][coc_contact] with any additional questions or comments. -[source]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage +[source]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azfile [docs]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azfile [rest_docs]: https://docs.microsoft.com/rest/api/storageservices/file-service-rest-api [product_docs]: https://docs.microsoft.com/azure/storage/files/storage-files-introduction @@ -257,8 +257,8 @@ additional questions or comments. [storage_account_create_portal]: https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-portal [azure_sub]: https://azure.microsoft.com/free/ [azcore_response_error]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore#ResponseError -[file_error]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage -[samples]: https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/storage +[file_error]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azfile/fileerror/error_codes.go +[samples]: https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/storage/azfile/file/examples_test.go [storage_contrib]: https://github.com/Azure/azure-sdk-for-go/blob/main/CONTRIBUTING.md [cla]: https://cla.microsoft.com [coc]: https://opensource.microsoft.com/codeofconduct/ diff --git a/sdk/storage/azfile/file/client_test.go b/sdk/storage/azfile/file/client_test.go index f04191104e1a..094b660b9d7b 100644 --- a/sdk/storage/azfile/file/client_test.go +++ b/sdk/storage/azfile/file/client_test.go @@ -3117,5 +3117,3 @@ func (f *FileRecordedTestsSuite) TestFileForceCloseHandlesDefault() { } // TODO: Add tests for retry header options - -// TODO: fix links in README: source, file_error, samples diff --git a/sdk/storage/azfile/file/models.go b/sdk/storage/azfile/file/models.go index f27195800f02..d8792ba6f98b 100644 --- a/sdk/storage/azfile/file/models.go +++ b/sdk/storage/azfile/file/models.go @@ -732,7 +732,7 @@ func (u *UploadStreamOptions) getUploadRangeOptions() *UploadRangeOptions { } } -// URLParts object represents the components that make up an Azure Storage Container/Blob URL. +// URLParts object represents the components that make up an Azure Storage Share/Directory/File URL. // NOTE: Changing any SAS-related field requires computing a new SAS signature. type URLParts = sas.URLParts diff --git a/sdk/storage/azfile/internal/shared/shared.go b/sdk/storage/azfile/internal/shared/shared.go index 9ef2a3396816..0b819c28ea5a 100644 --- a/sdk/storage/azfile/internal/shared/shared.go +++ b/sdk/storage/azfile/internal/shared/shared.go @@ -100,20 +100,37 @@ func ParseConnectionString(connectionString string) (ParsedConnectionString, err connStrMap[parts[0]] = parts[1] } - accountName, ok := connStrMap["AccountName"] - if !ok { - return ParsedConnectionString{}, errors.New("connection string missing AccountName") - } - + accountName := connStrMap["AccountName"] accountKey, ok := connStrMap["AccountKey"] if !ok { sharedAccessSignature, ok := connStrMap["SharedAccessSignature"] if !ok { return ParsedConnectionString{}, errors.New("connection string missing AccountKey and SharedAccessSignature") } - return ParsedConnectionString{ - ServiceURL: fmt.Sprintf("%v://%v.file.%v/?%v", defaultScheme, accountName, defaultSuffix, sharedAccessSignature), - }, nil + + fileEndpoint, ok := connStrMap["FileEndpoint"] + if !ok { + // We don't have a FileEndpoint, assume the default + if accountName != "" { + return ParsedConnectionString{ + ServiceURL: fmt.Sprintf("%v://%v.file.%v/?%v", defaultScheme, accountName, defaultSuffix, sharedAccessSignature), + }, nil + } else { + return ParsedConnectionString{}, errors.New("connection string missing AccountName") + } + } else { + if !strings.HasSuffix(fileEndpoint, "/") { + // add a trailing slash to be consistent with the portal + fileEndpoint += "/" + } + return ParsedConnectionString{ + ServiceURL: fmt.Sprintf("%v?%v", fileEndpoint, sharedAccessSignature), + }, nil + } + } else { + if accountName == "" { + return ParsedConnectionString{}, errors.New("connection string missing AccountName") + } } protocol, ok := connStrMap["DefaultEndpointsProtocol"] diff --git a/sdk/storage/azfile/internal/shared/shared_test.go b/sdk/storage/azfile/internal/shared/shared_test.go index 1cd5da99469d..8bb7f9a9e655 100644 --- a/sdk/storage/azfile/internal/shared/shared_test.go +++ b/sdk/storage/azfile/internal/shared/shared_test.go @@ -7,6 +7,7 @@ package shared import ( + "fmt" "testing" "github.com/stretchr/testify/require" @@ -93,3 +94,42 @@ func TestCParseConnectionStringAzurite(t *testing.T) { require.Equal(t, "dummyaccountname", parsed.AccountName) require.Equal(t, "secretkeykey", parsed.AccountKey) } + +func TestParseConnectionStringSASAndCustomDomain(t *testing.T) { + testData := []struct { + connectionStr string + parsedServiceURL string + parsedAccountName string + parsedAccountKey string + err error + }{ + { + connectionStr: "AccountName=dummyaccountname;SharedAccessSignature=fakesharedaccesssignature;FileEndpoint=http://127.0.0.1:10000/dummyaccountname;", + parsedServiceURL: "http://127.0.0.1:10000/dummyaccountname/?fakesharedaccesssignature", + }, + { + connectionStr: "BlobEndpoint=https://dummyaccountname.blob.core.windows.net/;FileEndpoint=https://dummyaccountname.file.core.windows.net/;SharedAccessSignature=fakesharedaccesssignature", + parsedServiceURL: "https://dummyaccountname.file.core.windows.net/?fakesharedaccesssignature", + }, + { + connectionStr: "BlobEndpoint=https://dummyaccountname.blob.core.windows.net;FileEndpoint=https://dummyaccountname.file.core.windows.net;SharedAccessSignature=fakesharedaccesssignature", + parsedServiceURL: "https://dummyaccountname.file.core.windows.net/?fakesharedaccesssignature", + }, + { + connectionStr: "SharedAccessSignature=fakesharedaccesssignature", + err: fmt.Errorf("connection string missing AccountName"), + }, + { + connectionStr: "DefaultEndpointsProtocol=http;AccountKey=secretkeykey;EndpointSuffix=core.windows.net", + err: fmt.Errorf("connection string missing AccountName"), + }, + } + + for _, td := range testData { + parsed, err := ParseConnectionString(td.connectionStr) + require.Equal(t, td.err, err) + require.Equal(t, td.parsedServiceURL, parsed.ServiceURL) + require.Equal(t, td.parsedAccountName, parsed.AccountName) + require.Equal(t, td.parsedAccountKey, parsed.AccountKey) + } +} From d54fb0885e7d9bf2d1b3fddcb02576510dce1b76 Mon Sep 17 00:00:00 2001 From: gracewilcox <43627800+gracewilcox@users.noreply.github.com> Date: Mon, 8 May 2023 09:45:32 -0700 Subject: [PATCH 28/50] [azadmin] fix flaky test (#20758) * fix flaky test * charles suggestion --- .../keyvault/azadmin/settings/client_test.go | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/sdk/security/keyvault/azadmin/settings/client_test.go b/sdk/security/keyvault/azadmin/settings/client_test.go index 932444a5595c..d638eb6dad5e 100644 --- a/sdk/security/keyvault/azadmin/settings/client_test.go +++ b/sdk/security/keyvault/azadmin/settings/client_test.go @@ -8,10 +8,13 @@ package settings_test import ( "context" + "errors" "testing" + "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/recording" "github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azadmin/settings" "github.com/stretchr/testify/require" ) @@ -103,7 +106,16 @@ func TestUpdateSetting_InvalidSettingName(t *testing.T) { require.Nil(t, res.Type) require.Nil(t, res.Value) - res, err = client.UpdateSetting(context.Background(), "invalid name", settings.UpdateSettingRequest{Value: to.Ptr("true")}, nil) + for i := 0; i < 4; i++ { + res, err = client.UpdateSetting(context.Background(), "invalid name", settings.UpdateSettingRequest{Value: to.Ptr("true")}, nil) + var httpErr *azcore.ResponseError + // if correct error is returned, break from the loop and check for correctness + if errors.As(err, &httpErr) && httpErr.StatusCode == 400 { + break + } + // else sleep for 30 seconds and try again + recording.Sleep(30 * time.Second) + } require.Error(t, err) require.Nil(t, res.Name) require.Nil(t, res.Type) From ad8ebd9347c6316be3bf3f63d790046df6904301 Mon Sep 17 00:00:00 2001 From: Charles Lowell <10964656+chlowell@users.noreply.github.com> Date: Mon, 8 May 2023 10:44:05 -0700 Subject: [PATCH 29/50] Prepare azidentity v1.3.0 for release (#20756) --- sdk/azidentity/CHANGELOG.md | 6 +----- sdk/azidentity/version.go | 2 +- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/sdk/azidentity/CHANGELOG.md b/sdk/azidentity/CHANGELOG.md index 85d1d4a7c200..cc8034cf7a8b 100644 --- a/sdk/azidentity/CHANGELOG.md +++ b/sdk/azidentity/CHANGELOG.md @@ -1,16 +1,12 @@ # Release History -## 1.3.0-beta.6 (Unreleased) - -### Features Added +## 1.3.0 (2023-05-09) ### Breaking Changes > These changes affect only code written against a beta version such as v1.3.0-beta.5 * Renamed `NewOnBehalfOfCredentialFromCertificate` to `NewOnBehalfOfCredentialWithCertificate` * Renamed `NewOnBehalfOfCredentialFromSecret` to `NewOnBehalfOfCredentialWithSecret` -### Bugs Fixed - ### Other Changes * Upgraded to MSAL v1.0.0 diff --git a/sdk/azidentity/version.go b/sdk/azidentity/version.go index 91398c112d8a..1a526b2e874d 100644 --- a/sdk/azidentity/version.go +++ b/sdk/azidentity/version.go @@ -11,5 +11,5 @@ const ( component = "azidentity" // Version is the semantic version (see http://semver.org) of this module. - version = "v1.3.0-beta.6" + version = "v1.3.0" ) From e2a6f7079d5b28ecae1f4744b3eda9e3d21236f5 Mon Sep 17 00:00:00 2001 From: Azure SDK Bot <53356347+azure-sdk@users.noreply.github.com> Date: Mon, 8 May 2023 14:56:02 -0700 Subject: [PATCH 30/50] Fix broken podman link (#20801) Co-authored-by: Wes Haggard --- eng/common/testproxy/transition-scripts/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eng/common/testproxy/transition-scripts/README.md b/eng/common/testproxy/transition-scripts/README.md index 06290bd0c784..09c2b364f0f3 100644 --- a/eng/common/testproxy/transition-scripts/README.md +++ b/eng/common/testproxy/transition-scripts/README.md @@ -46,7 +46,7 @@ To utilize this methodology, the user must set input argument `TestProxyExe` to Other requirements: -- [x] Install [docker](https://docs.docker.com/engine/install/) or [podman](https://podman.io/getting-started/installation.html) +- [x] Install [docker](https://docs.docker.com/engine/install/) or [podman](https://podman.io/) - [x] Set the environment variable `GIT_TOKEN` a valid token representing YOUR user ## Permissions From a59d912e20267f084140717fb80aed1c8c2afac3 Mon Sep 17 00:00:00 2001 From: gracewilcox <43627800+gracewilcox@users.noreply.github.com> Date: Mon, 8 May 2023 15:08:34 -0700 Subject: [PATCH 31/50] [azquery] update doc comments (#20755) * update doc comments * update statistics and visualization generation * prep-for-release --- sdk/monitor/azquery/CHANGELOG.md | 9 +--- sdk/monitor/azquery/autorest.md | 64 +---------------------------- sdk/monitor/azquery/build.go | 2 +- sdk/monitor/azquery/logs_client.go | 10 +++-- sdk/monitor/azquery/models.go | 8 ++-- sdk/monitor/azquery/models_serde.go | 8 ++-- sdk/monitor/azquery/version.go | 2 +- 7 files changed, 19 insertions(+), 84 deletions(-) diff --git a/sdk/monitor/azquery/CHANGELOG.md b/sdk/monitor/azquery/CHANGELOG.md index c0367796af1b..5f27242e7455 100644 --- a/sdk/monitor/azquery/CHANGELOG.md +++ b/sdk/monitor/azquery/CHANGELOG.md @@ -1,14 +1,9 @@ # Release History -## 1.1.0-beta.2 (Unreleased) - -### Features Added - -### Breaking Changes - -### Bugs Fixed +## 1.1.0 (2023-05-09) ### Other Changes +* Updated doc comments ## 1.1.0-beta.1 (2023-04-11) diff --git a/sdk/monitor/azquery/autorest.md b/sdk/monitor/azquery/autorest.md index 52b9cdea95f8..d2b07a06316f 100644 --- a/sdk/monitor/azquery/autorest.md +++ b/sdk/monitor/azquery/autorest.md @@ -9,7 +9,7 @@ clear-output-folder: false export-clients: true go: true input-file: - - https://github.com/Azure/azure-rest-api-specs/blob/605407bc0c1a133018285f550d01175469cb3c3a/specification/operationalinsights/data-plane/Microsoft.OperationalInsights/stable/2022-10-27/OperationalInsights.json + - https://github.com/Azure/azure-rest-api-specs/blob/72427ef3ff5875bd8409ef112ef5e6f3cf2b8795/specification/operationalinsights/data-plane/Microsoft.OperationalInsights/stable/2022-10-27/OperationalInsights.json - https://github.com/Azure/azure-rest-api-specs/blob/dba6ed1f03bda88ac6884c0a883246446cc72495/specification/monitor/resource-manager/Microsoft.Insights/stable/2018-01-01/metricDefinitions_API.json - https://github.com/Azure/azure-rest-api-specs/blob/dba6ed1f03bda88ac6884c0a883246446cc72495/specification/monitor/resource-manager/Microsoft.Insights/stable/2018-01-01/metrics_API.json - https://github.com/Azure/azure-rest-api-specs/blob/dba6ed1f03bda88ac6884c0a883246446cc72495/specification/monitor/resource-manager/Microsoft.Insights/preview/2017-12-01-preview/metricNamespaces_API.json @@ -152,48 +152,10 @@ directive: transform: $["name"] = "BatchQueryRequestMethod" # add descriptions for models and constants that don't have them - - from: swagger-document - where: $.definitions.batchQueryRequest.properties.path - transform: $["description"] = "The query path of a single request in a batch, defaults to /query" - - from: swagger-document - where: $.definitions.batchQueryRequest.properties.method - transform: $["description"] = "The method of a single request in a batch, defaults to POST" - - from: swagger-document - where: $.definitions.batchQueryResponse - transform: $["description"] = "Contains the batch query response and the headers, id, and status of the request" - from: constants.go where: $ transform: return $.replace(/type ResultType string/, "//ResultType - Reduces the set of data collected. The syntax allowed depends on the operation. See the operation's description for details.\ntype ResultType string"); - # update doc comments - - from: swagger-document - where: $.paths["/workspaces/{workspaceId}/query"].post - transform: $["description"] = "Executes an Analytics query for data." - - from: swagger-document - where: $.paths["/$batch"].post - transform: $["description"] = "Executes a batch of Analytics queries for data." - - from: swagger-document - where: $.definitions.queryResults.properties.tables - transform: $["description"] = "The results of the query in tabular format." - - from: swagger-document - where: $.definitions.batchQueryResults.properties.tables - transform: $["description"] = "The results of the query in tabular format." - - from: swagger-document - where: $.definitions.queryBody.properties.workspaces - transform: $["description"] = "A list of workspaces to query in addition to the primary workspace." - - from: swagger-document - where: $.definitions.batchQueryRequest.properties.headers - transform: $["description"] = "Optional. Headers of the request. Can use prefer header to set server timeout, query statistics and visualization information. For more information, see https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery#readme-increase-wait-time-include-statistics-include-render-visualization" - - from: swagger-document - where: $.definitions.batchQueryRequest.properties.workspace - transform: $["description"] = "Primary Workspace ID of the query" - - from: swagger-document - where: $.definitions.batchQueryRequest.properties.id - transform: $["description"] = "Unique ID corresponding to each request in the batch" - - from: swagger-document - where: $.parameters.workspaceId - transform: $["description"] = "Primary Workspace ID of the query. This is Workspace ID from the Properties blade in the Azure portal" - # delete unused error models - from: models.go where: $ @@ -222,29 +184,7 @@ directive: # change Table.Rows from type [][]interface{} to type []Row - from: models.go where: $ - transform: return $.replace(/Rows \[\]\[\]any/, "Rows []Row"); - - # change render and statistics type to []byte - - from: models.go - where: $ - transform: return $.replace(/Statistics any/g, "Statistics []byte"); - - from: models.go - where: $ - transform: return $.replace(/Visualization any/g, "Visualization []byte"); - - from: models_serde.go - where: $ - transform: return - $.replace(/err(.*)r\.Statistics\)/, "r.Statistics = val") - - from: models_serde.go - where: $ - transform: return $.replace(/err(.*)r\.Visualization\)/, "r.Visualization = val"); - - from: models_serde.go - where: $ - transform: return - $.replace(/err(.*)b\.Statistics\)/, "b.Statistics = val") - - from: models_serde.go - where: $ - transform: return $.replace(/err(.*)b\.Visualization\)/, "b.Visualization = val"); + transform: return $.replace(/Rows \[\]\[\]byte/, "Rows []Row"); # change type of timespan from *string to *TimeInterval - from: models.go diff --git a/sdk/monitor/azquery/build.go b/sdk/monitor/azquery/build.go index d547e0d3ce3c..8634768d24b1 100644 --- a/sdk/monitor/azquery/build.go +++ b/sdk/monitor/azquery/build.go @@ -1,7 +1,7 @@ //go:build go1.18 // +build go1.18 -//go:generate autorest ./autorest.md +//go:generate autorest ./autorest.md --rawjson-as-bytes //go:generate gofmt -w . // Copyright (c) Microsoft Corporation. All rights reserved. diff --git a/sdk/monitor/azquery/logs_client.go b/sdk/monitor/azquery/logs_client.go index 9d22842a5111..d12a59eda261 100644 --- a/sdk/monitor/azquery/logs_client.go +++ b/sdk/monitor/azquery/logs_client.go @@ -27,7 +27,8 @@ type LogsClient struct { internal *azcore.Client } -// QueryBatch - Executes a batch of Analytics queries for data. +// QueryBatch - Executes a batch of Analytics queries for data. Here [https://learn.microsoft.com/azure/azure-monitor/logs/api/batch-queries] +// is an example for using POST with an Analytics query. // If the operation fails it returns an *azcore.ResponseError type. // // Generated from API version 2022-10-27 @@ -68,7 +69,7 @@ func (client *LogsClient) queryBatchHandleResponse(resp *http.Response) (LogsCli return result, nil } -// QueryResource - Executes an Analytics query for data in the context of a resource. Here [https://docs.microsoft.com/azure/azure-monitor/logs/api/azure-resource-queries] +// QueryResource - Executes an Analytics query for data in the context of a resource. Here [https://learn.microsoft.com/azure/azure-monitor/logs/api/azure-resource-queries] // is an example for using POST with an Analytics // query. // If the operation fails it returns an *azcore.ResponseError type. @@ -116,11 +117,12 @@ func (client *LogsClient) queryResourceHandleResponse(resp *http.Response) (Logs return result, nil } -// QueryWorkspace - Executes an Analytics query for data. +// QueryWorkspace - Executes an Analytics query for data. Here [https://learn.microsoft.com/azure/azure-monitor/logs/api/request-format] +// is an example for using POST with an Analytics query. // If the operation fails it returns an *azcore.ResponseError type. // // Generated from API version 2022-10-27 -// - workspaceID - Primary Workspace ID of the query. This is Workspace ID from the Properties blade in the Azure portal +// - workspaceID - Primary Workspace ID of the query. This is the Workspace ID from the Properties blade in the Azure portal. // - body - The Analytics query. Learn more about the Analytics query syntax [https://azure.microsoft.com/documentation/articles/app-insights-analytics-reference/] // - options - LogsClientQueryWorkspaceOptions contains the optional parameters for the LogsClient.QueryWorkspace method. func (client *LogsClient) QueryWorkspace(ctx context.Context, workspaceID string, body Body, options *LogsClientQueryWorkspaceOptions) (LogsClientQueryWorkspaceResponse, error) { diff --git a/sdk/monitor/azquery/models.go b/sdk/monitor/azquery/models.go index 6185b68ca31c..1fc49eeeb8f9 100644 --- a/sdk/monitor/azquery/models.go +++ b/sdk/monitor/azquery/models.go @@ -16,15 +16,13 @@ type BatchQueryRequest struct { // REQUIRED; The Analytics query. Learn more about the Analytics query syntax [https://azure.microsoft.com/documentation/articles/app-insights-analytics-reference/] Body *Body `json:"body,omitempty"` - // REQUIRED; Unique ID corresponding to each request in the batch + // REQUIRED; Unique ID corresponding to each request in the batch. CorrelationID *string `json:"id,omitempty"` - // REQUIRED; Primary Workspace ID of the query + // REQUIRED; Primary Workspace ID of the query. This is the Workspace ID from the Properties blade in the Azure portal. WorkspaceID *string `json:"workspace,omitempty"` - // Optional. Headers of the request. Can use prefer header to set server timeout, query statistics and visualization information. - // For more information, see - // https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery#readme-increase-wait-time-include-statistics-include-render-visualization + // Headers of the request. Can use prefer header to set server timeout and to query statistics and visualization information. Headers map[string]*string `json:"headers,omitempty"` // The method of a single request in a batch, defaults to POST diff --git a/sdk/monitor/azquery/models_serde.go b/sdk/monitor/azquery/models_serde.go index 9667f3ecec6f..26af83bea4e6 100644 --- a/sdk/monitor/azquery/models_serde.go +++ b/sdk/monitor/azquery/models_serde.go @@ -113,9 +113,9 @@ func (b *BatchQueryResponse) UnmarshalJSON(data []byte) error { func (b BatchQueryResults) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) populate(objectMap, "error", b.Error) - populate(objectMap, "statistics", &b.Statistics) + populate(objectMap, "statistics", json.RawMessage(b.Statistics)) populate(objectMap, "tables", b.Tables) - populate(objectMap, "render", &b.Visualization) + populate(objectMap, "render", json.RawMessage(b.Visualization)) return json.Marshal(objectMap) } @@ -713,9 +713,9 @@ func (r *Response) UnmarshalJSON(data []byte) error { func (r Results) MarshalJSON() ([]byte, error) { objectMap := make(map[string]any) populate(objectMap, "error", r.Error) - populate(objectMap, "statistics", &r.Statistics) + populate(objectMap, "statistics", json.RawMessage(r.Statistics)) populate(objectMap, "tables", r.Tables) - populate(objectMap, "render", &r.Visualization) + populate(objectMap, "render", json.RawMessage(r.Visualization)) return json.Marshal(objectMap) } diff --git a/sdk/monitor/azquery/version.go b/sdk/monitor/azquery/version.go index 58d40bd4c5d0..59ca11d98ff1 100644 --- a/sdk/monitor/azquery/version.go +++ b/sdk/monitor/azquery/version.go @@ -8,5 +8,5 @@ package azquery const ( moduleName = "azquery" - version = "v1.1.0-beta.2" + version = "v1.1.0" ) From bd3b4670e847f994d61c828b272f6f9294848574 Mon Sep 17 00:00:00 2001 From: Bob Tabor Date: Mon, 8 May 2023 18:40:09 -0500 Subject: [PATCH 32/50] Fixed contribution section (#20752) Co-authored-by: Bob Tabor --- sdk/storage/azqueue/README.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/sdk/storage/azqueue/README.md b/sdk/storage/azqueue/README.md index 57b55f321701..0877f9ae7637 100644 --- a/sdk/storage/azqueue/README.md +++ b/sdk/storage/azqueue/README.md @@ -204,15 +204,14 @@ Get started with our [Queue samples][samples]. They contain complete examples o See the [Storage CONTRIBUTING.md][storage_contrib] for details on building, testing, and contributing to this library. -This project welcomes contributions and suggestions. Most contributions require -you to agree to a Contributor License Agreement (CLA) declaring that you have -the right to, and actually do, grant us the rights to use your contribution. For -details, visit [cla.microsoft.com][cla]. +This project welcomes contributions and suggestions. Most contributions require you to agree to a [Contributor License Agreement (CLA)][cla] declaring that you have the right to, and actually do, grant us the rights to use your contribution. + +If you'd like to contribute to this library, please read the [contributing guide] [contributing_guide] to learn more about how to build and test the code. + +When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA. + +This project has adopted the [Microsoft Open Source Code of Conduct][coc]. For more information, see the [Code of Conduct FAQ][coc_faq] or contact [opencode@microsoft.com][coc_contact] with any additional questions or comments. -This project has adopted the [Microsoft Open Source Code of Conduct][coc]. -For more information see the [Code of Conduct FAQ][coc_faq] -or contact [opencode@microsoft.com][coc_contact] with any -additional questions or comments. [source]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azqueue @@ -235,7 +234,8 @@ additional questions or comments. [sas]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azqueue/sas [service]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azqueue/service_client.go [storage_contrib]: https://github.com/Azure/azure-sdk-for-go/blob/main/CONTRIBUTING.md +[contributing_guide]: https://github.com/Azure/azure-sdk-for-go/blob/main/CONTRIBUTING.md [cla]: https://cla.microsoft.com [coc]: https://opensource.microsoft.com/codeofconduct/ [coc_faq]: https://opensource.microsoft.com/codeofconduct/faq/ -[coc_contact]: mailto:opencode@microsoft.com +[coc_contact]: mailto:opencode@microsoft.com \ No newline at end of file From 132a01a254d0c6c985c6116a9ead29b802d10dd4 Mon Sep 17 00:00:00 2001 From: Richard Park <51494936+richardpark-msft@users.noreply.github.com> Date: Mon, 8 May 2023 16:58:14 -0700 Subject: [PATCH 33/50] [azeventhubs,azservicebus] Some API cleanup, renames (#20754) * Adding options to UpdateCheckpoint(), just for future potential expansion * Make Offset an int64, not a *int64 (it's not optional, it'll always come back with ReceivedEvents) * Adding more logging into the checkpoint store. * Point all imports at the production go-amqp --- sdk/messaging/azeventhubs/amqp_message.go | 2 +- sdk/messaging/azeventhubs/checkpoint_store.go | 8 +- .../azeventhubs/checkpoints/blob_store.go | 9 +- .../checkpoints/blob_store_test.go | 4 +- .../consumer_client_internal_test.go | 2 +- sdk/messaging/azeventhubs/event_data.go | 6 +- sdk/messaging/azeventhubs/event_data_batch.go | 2 +- .../azeventhubs/event_data_batch_unit_test.go | 2 +- sdk/messaging/azeventhubs/event_data_test.go | 6 +- .../example_checkpoint_migration_test.go | 2 +- ...example_consuming_with_checkpoints_test.go | 4 +- sdk/messaging/azeventhubs/go.mod | 3 +- sdk/messaging/azeventhubs/go.sum | 2 + .../inmemory_checkpoint_store_test.go | 4 +- .../azeventhubs/internal/amqp_fakes.go | 2 +- .../azeventhubs/internal/amqpwrap/amqpwrap.go | 2 +- .../internal/amqpwrap/mock_amqp_test.go | 2 +- .../azeventhubs/internal/amqpwrap/rpc.go | 2 +- sdk/messaging/azeventhubs/internal/cbs.go | 2 +- .../azeventhubs/internal/cbs_test.go | 2 +- .../stress/tests/processor_stress_tester.go | 2 +- .../internal/eh/stress/tests/shared.go | 2 +- sdk/messaging/azeventhubs/internal/errors.go | 2 +- .../azeventhubs/internal/errors_test.go | 2 +- .../azeventhubs/internal/go-amqp/LICENSE | 22 - .../azeventhubs/internal/go-amqp/conn.go | 1135 --------- .../azeventhubs/internal/go-amqp/const.go | 96 - .../azeventhubs/internal/go-amqp/creditor.go | 119 - .../azeventhubs/internal/go-amqp/errors.go | 107 - .../go-amqp/internal/bitmap/bitmap.go | 99 - .../go-amqp/internal/buffer/buffer.go | 180 -- .../internal/go-amqp/internal/debug/debug.go | 20 - .../go-amqp/internal/debug/debug_debug.go | 51 - .../go-amqp/internal/encoding/decode.go | 1150 --------- .../go-amqp/internal/encoding/encode.go | 573 ----- .../go-amqp/internal/encoding/types.go | 2155 ----------------- .../go-amqp/internal/frames/frames.go | 1543 ------------ .../go-amqp/internal/frames/parsing.go | 162 -- .../internal/go-amqp/internal/queue/queue.go | 164 -- .../go-amqp/internal/shared/shared.go | 36 - .../azeventhubs/internal/go-amqp/link.go | 390 --- .../internal/go-amqp/link_options.go | 241 -- .../azeventhubs/internal/go-amqp/message.go | 492 ---- .../azeventhubs/internal/go-amqp/receiver.go | 897 ------- .../azeventhubs/internal/go-amqp/sasl.go | 262 -- .../azeventhubs/internal/go-amqp/sender.go | 476 ---- .../azeventhubs/internal/go-amqp/session.go | 792 ------ .../azeventhubs/internal/links_test.go | 2 +- .../azeventhubs/internal/links_unit_test.go | 2 +- .../azeventhubs/internal/mock/mock_amqp.go | 2 +- .../azeventhubs/internal/mock/mock_helpers.go | 2 +- .../azeventhubs/internal/namespace.go | 2 +- .../azeventhubs/internal/namespace_test.go | 2 +- sdk/messaging/azeventhubs/internal/rpc.go | 2 +- .../azeventhubs/internal/rpc_test.go | 2 +- .../internal/utils/retrier_test.go | 2 +- sdk/messaging/azeventhubs/mgmt.go | 2 +- sdk/messaging/azeventhubs/partition_client.go | 2 +- .../azeventhubs/partition_client_unit_test.go | 2 +- .../azeventhubs/processor_load_balancer.go | 23 +- .../azeventhubs/processor_partition_client.go | 20 +- sdk/messaging/azeventhubs/processor_test.go | 12 +- .../azeventhubs/processor_unit_test.go | 4 +- sdk/messaging/azeventhubs/producer_client.go | 2 +- sdk/messaging/azservicebus/amqp_message.go | 2 +- sdk/messaging/azservicebus/go.mod | 4 +- sdk/messaging/azservicebus/go.sum | 11 +- .../azservicebus/internal/amqpLinks_test.go | 2 +- .../azservicebus/internal/amqp_test_utils.go | 2 +- .../internal/amqplinks_unit_test.go | 2 +- .../internal/amqpwrap/amqpwrap.go | 2 +- .../internal/amqpwrap/mock_amqp_test.go | 2 +- .../azservicebus/internal/amqpwrap/rpc.go | 2 +- sdk/messaging/azservicebus/internal/cbs.go | 2 +- sdk/messaging/azservicebus/internal/errors.go | 2 +- .../azservicebus/internal/errors_test.go | 2 +- .../internal/exported/error_test.go | 2 +- .../azservicebus/internal/go-amqp/LICENSE | 22 - .../azservicebus/internal/go-amqp/conn.go | 1135 --------- .../azservicebus/internal/go-amqp/const.go | 96 - .../azservicebus/internal/go-amqp/creditor.go | 119 - .../azservicebus/internal/go-amqp/errors.go | 107 - .../go-amqp/internal/bitmap/bitmap.go | 99 - .../go-amqp/internal/buffer/buffer.go | 180 -- .../internal/go-amqp/internal/debug/debug.go | 20 - .../go-amqp/internal/debug/debug_debug.go | 51 - .../go-amqp/internal/encoding/decode.go | 1150 --------- .../go-amqp/internal/encoding/encode.go | 573 ----- .../go-amqp/internal/encoding/types.go | 2155 ----------------- .../go-amqp/internal/frames/frames.go | 1543 ------------ .../go-amqp/internal/frames/parsing.go | 162 -- .../internal/go-amqp/internal/queue/queue.go | 164 -- .../go-amqp/internal/shared/shared.go | 36 - .../azservicebus/internal/go-amqp/link.go | 390 --- .../internal/go-amqp/link_options.go | 241 -- .../azservicebus/internal/go-amqp/message.go | 492 ---- .../azservicebus/internal/go-amqp/receiver.go | 897 ------- .../azservicebus/internal/go-amqp/sasl.go | 262 -- .../azservicebus/internal/go-amqp/sender.go | 482 ---- .../azservicebus/internal/go-amqp/session.go | 792 ------ sdk/messaging/azservicebus/internal/mgmt.go | 2 +- .../internal/mock/emulation/events.go | 2 +- .../internal/mock/emulation/mock_data.go | 2 +- .../mock/emulation/mock_data_receiver.go | 2 +- .../mock/emulation/mock_data_sender.go | 2 +- .../mock/emulation/mock_data_session.go | 2 +- .../internal/mock/emulation/mock_data_test.go | 2 +- .../internal/mock/emulation/queue.go | 2 +- .../internal/mock/emulation/queue_test.go | 2 +- .../azservicebus/internal/mock/mock_amqp.go | 2 +- .../internal/mock/mock_helpers.go | 2 +- .../azservicebus/internal/mock/mock_rpc.go | 2 +- .../azservicebus/internal/namespace.go | 2 +- .../azservicebus/internal/namespace_test.go | 2 +- sdk/messaging/azservicebus/internal/rpc.go | 2 +- .../azservicebus/internal/rpc_test.go | 2 +- .../internal/test/test_helpers.go | 2 +- .../internal/utils/retrier_test.go | 2 +- sdk/messaging/azservicebus/message.go | 2 +- sdk/messaging/azservicebus/messageSettler.go | 2 +- sdk/messaging/azservicebus/message_batch.go | 2 +- .../azservicebus/message_batch_test.go | 2 +- sdk/messaging/azservicebus/message_test.go | 2 +- sdk/messaging/azservicebus/receiver.go | 2 +- .../azservicebus/receiver_helpers_test.go | 2 +- .../azservicebus/receiver_simulated_test.go | 2 +- sdk/messaging/azservicebus/receiver_test.go | 4 +- .../azservicebus/receiver_unit_test.go | 2 +- sdk/messaging/azservicebus/sender.go | 2 +- .../azservicebus/sender_unit_test.go | 2 +- .../azservicebus/session_receiver.go | 2 +- .../azservicebus/session_receiver_test.go | 2 +- 132 files changed, 150 insertions(+), 22444 deletions(-) delete mode 100644 sdk/messaging/azeventhubs/internal/go-amqp/LICENSE delete mode 100644 sdk/messaging/azeventhubs/internal/go-amqp/conn.go delete mode 100644 sdk/messaging/azeventhubs/internal/go-amqp/const.go delete mode 100644 sdk/messaging/azeventhubs/internal/go-amqp/creditor.go delete mode 100644 sdk/messaging/azeventhubs/internal/go-amqp/errors.go delete mode 100644 sdk/messaging/azeventhubs/internal/go-amqp/internal/bitmap/bitmap.go delete mode 100644 sdk/messaging/azeventhubs/internal/go-amqp/internal/buffer/buffer.go delete mode 100644 sdk/messaging/azeventhubs/internal/go-amqp/internal/debug/debug.go delete mode 100644 sdk/messaging/azeventhubs/internal/go-amqp/internal/debug/debug_debug.go delete mode 100644 sdk/messaging/azeventhubs/internal/go-amqp/internal/encoding/decode.go delete mode 100644 sdk/messaging/azeventhubs/internal/go-amqp/internal/encoding/encode.go delete mode 100644 sdk/messaging/azeventhubs/internal/go-amqp/internal/encoding/types.go delete mode 100644 sdk/messaging/azeventhubs/internal/go-amqp/internal/frames/frames.go delete mode 100644 sdk/messaging/azeventhubs/internal/go-amqp/internal/frames/parsing.go delete mode 100644 sdk/messaging/azeventhubs/internal/go-amqp/internal/queue/queue.go delete mode 100644 sdk/messaging/azeventhubs/internal/go-amqp/internal/shared/shared.go delete mode 100644 sdk/messaging/azeventhubs/internal/go-amqp/link.go delete mode 100644 sdk/messaging/azeventhubs/internal/go-amqp/link_options.go delete mode 100644 sdk/messaging/azeventhubs/internal/go-amqp/message.go delete mode 100644 sdk/messaging/azeventhubs/internal/go-amqp/receiver.go delete mode 100644 sdk/messaging/azeventhubs/internal/go-amqp/sasl.go delete mode 100644 sdk/messaging/azeventhubs/internal/go-amqp/sender.go delete mode 100644 sdk/messaging/azeventhubs/internal/go-amqp/session.go delete mode 100644 sdk/messaging/azservicebus/internal/go-amqp/LICENSE delete mode 100644 sdk/messaging/azservicebus/internal/go-amqp/conn.go delete mode 100644 sdk/messaging/azservicebus/internal/go-amqp/const.go delete mode 100644 sdk/messaging/azservicebus/internal/go-amqp/creditor.go delete mode 100644 sdk/messaging/azservicebus/internal/go-amqp/errors.go delete mode 100644 sdk/messaging/azservicebus/internal/go-amqp/internal/bitmap/bitmap.go delete mode 100644 sdk/messaging/azservicebus/internal/go-amqp/internal/buffer/buffer.go delete mode 100644 sdk/messaging/azservicebus/internal/go-amqp/internal/debug/debug.go delete mode 100644 sdk/messaging/azservicebus/internal/go-amqp/internal/debug/debug_debug.go delete mode 100644 sdk/messaging/azservicebus/internal/go-amqp/internal/encoding/decode.go delete mode 100644 sdk/messaging/azservicebus/internal/go-amqp/internal/encoding/encode.go delete mode 100644 sdk/messaging/azservicebus/internal/go-amqp/internal/encoding/types.go delete mode 100644 sdk/messaging/azservicebus/internal/go-amqp/internal/frames/frames.go delete mode 100644 sdk/messaging/azservicebus/internal/go-amqp/internal/frames/parsing.go delete mode 100644 sdk/messaging/azservicebus/internal/go-amqp/internal/queue/queue.go delete mode 100644 sdk/messaging/azservicebus/internal/go-amqp/internal/shared/shared.go delete mode 100644 sdk/messaging/azservicebus/internal/go-amqp/link.go delete mode 100644 sdk/messaging/azservicebus/internal/go-amqp/link_options.go delete mode 100644 sdk/messaging/azservicebus/internal/go-amqp/message.go delete mode 100644 sdk/messaging/azservicebus/internal/go-amqp/receiver.go delete mode 100644 sdk/messaging/azservicebus/internal/go-amqp/sasl.go delete mode 100644 sdk/messaging/azservicebus/internal/go-amqp/sender.go delete mode 100644 sdk/messaging/azservicebus/internal/go-amqp/session.go diff --git a/sdk/messaging/azeventhubs/amqp_message.go b/sdk/messaging/azeventhubs/amqp_message.go index 2b6ca2db8915..2e0bc54045f5 100644 --- a/sdk/messaging/azeventhubs/amqp_message.go +++ b/sdk/messaging/azeventhubs/amqp_message.go @@ -6,7 +6,7 @@ package azeventhubs import ( "time" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp" + "github.com/Azure/go-amqp" ) // AMQPAnnotatedMessage represents the AMQP message, as received from Event Hubs. diff --git a/sdk/messaging/azeventhubs/checkpoint_store.go b/sdk/messaging/azeventhubs/checkpoint_store.go index 3d321839251b..83c1c3e54fa7 100644 --- a/sdk/messaging/azeventhubs/checkpoint_store.go +++ b/sdk/messaging/azeventhubs/checkpoint_store.go @@ -22,8 +22,8 @@ type CheckpointStore interface { // ListOwnership lists all ownerships. ListOwnership(ctx context.Context, fullyQualifiedNamespace string, eventHubName string, consumerGroup string, options *ListOwnershipOptions) ([]Ownership, error) - // UpdateCheckpoint updates a specific checkpoint with a sequence and offset. - UpdateCheckpoint(ctx context.Context, checkpoint Checkpoint, options *UpdateCheckpointOptions) error + // SetCheckpoint updates a specific checkpoint with a sequence and offset. + SetCheckpoint(ctx context.Context, checkpoint Checkpoint, options *SetCheckpointOptions) error } // Ownership tracks which consumer owns a particular partition. @@ -59,8 +59,8 @@ type ListOwnershipOptions struct { // For future expansion } -// UpdateCheckpointOptions contains optional parameters for the UpdateCheckpoint function -type UpdateCheckpointOptions struct { +// SetCheckpointOptions contains optional parameters for the UpdateCheckpoint function +type SetCheckpointOptions struct { // For future expansion } diff --git a/sdk/messaging/azeventhubs/checkpoints/blob_store.go b/sdk/messaging/azeventhubs/checkpoints/blob_store.go index e8a134643603..5bb0fe17068b 100644 --- a/sdk/messaging/azeventhubs/checkpoints/blob_store.go +++ b/sdk/messaging/azeventhubs/checkpoints/blob_store.go @@ -15,6 +15,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" @@ -63,6 +64,8 @@ func (b *BlobStore) ClaimOwnership(ctx context.Context, partitionOwnership []aze if bloberror.HasCode(err, bloberror.ConditionNotMet, // updated before we could update it bloberror.BlobAlreadyExists) { // created before we could create it + + log.Writef(azeventhubs.EventConsumer, "[%s] skipping %s because: %s", po.OwnerID, po.PartitionID, err) continue } @@ -180,10 +183,10 @@ func (b *BlobStore) ListOwnership(ctx context.Context, fullyQualifiedNamespace s return ownerships, nil } -// UpdateCheckpoint updates a specific checkpoint with a sequence and offset. +// SetCheckpoint updates a specific checkpoint with a sequence and offset. // // NOTE: This function doesn't attempt to prevent simultaneous checkpoint updates - ownership is assumed. -func (b *BlobStore) UpdateCheckpoint(ctx context.Context, checkpoint azeventhubs.Checkpoint, options *azeventhubs.UpdateCheckpointOptions) error { +func (b *BlobStore) SetCheckpoint(ctx context.Context, checkpoint azeventhubs.Checkpoint, options *azeventhubs.SetCheckpointOptions) error { blobName, err := nameForCheckpointBlob(checkpoint) if err != nil { @@ -199,6 +202,7 @@ func (b *BlobStore) setOwnershipMetadata(ctx context.Context, blobName string, o blobClient := b.cc.NewBlockBlobClient(blobName) if ownership.ETag != nil { + log.Writef(azeventhubs.EventConsumer, "[%s] claiming ownership for %s with etag %s", ownership.OwnerID, ownership.PartitionID, string(*ownership.ETag)) setMetadataResp, err := blobClient.SetMetadata(ctx, blobMetadata, &blob.SetMetadataOptions{ AccessConditions: &blob.AccessConditions{ ModifiedAccessConditions: &blob.ModifiedAccessConditions{ @@ -214,6 +218,7 @@ func (b *BlobStore) setOwnershipMetadata(ctx context.Context, blobName string, o return setMetadataResp.LastModified, *setMetadataResp.ETag, nil } + log.Writef(azeventhubs.EventConsumer, "[%s] claiming ownership for %s with NO etags", ownership.PartitionID, ownership.OwnerID) uploadResp, err := blobClient.Upload(ctx, streaming.NopCloser(bytes.NewReader([]byte{})), &blockblob.UploadOptions{ Metadata: blobMetadata, AccessConditions: &blob.AccessConditions{ diff --git a/sdk/messaging/azeventhubs/checkpoints/blob_store_test.go b/sdk/messaging/azeventhubs/checkpoints/blob_store_test.go index 4605fdcb33ba..49923e1f46a5 100644 --- a/sdk/messaging/azeventhubs/checkpoints/blob_store_test.go +++ b/sdk/messaging/azeventhubs/checkpoints/blob_store_test.go @@ -33,7 +33,7 @@ func TestBlobStore_Checkpoints(t *testing.T) { require.NoError(t, err) require.Empty(t, checkpoints) - err = store.UpdateCheckpoint(context.Background(), azeventhubs.Checkpoint{ + err = store.SetCheckpoint(context.Background(), azeventhubs.Checkpoint{ ConsumerGroup: "$Default", EventHubName: "event-hub-name", FullyQualifiedNamespace: "ns.servicebus.windows.net", @@ -57,7 +57,7 @@ func TestBlobStore_Checkpoints(t *testing.T) { // There's a code path to allow updating the blob after it's been created but without an etag // in which case it just updates it. - err = store.UpdateCheckpoint(context.Background(), azeventhubs.Checkpoint{ + err = store.SetCheckpoint(context.Background(), azeventhubs.Checkpoint{ ConsumerGroup: "$Default", EventHubName: "event-hub-name", FullyQualifiedNamespace: "ns.servicebus.windows.net", diff --git a/sdk/messaging/azeventhubs/consumer_client_internal_test.go b/sdk/messaging/azeventhubs/consumer_client_internal_test.go index dedd628be219..50a2d06ae62b 100644 --- a/sdk/messaging/azeventhubs/consumer_client_internal_test.go +++ b/sdk/messaging/azeventhubs/consumer_client_internal_test.go @@ -126,7 +126,7 @@ func TestConsumerClient_Recovery(t *testing.T) { require.NoError(t, err) require.EqualValues(t, 1, len(events)) - t.Logf("[%s] Received seq:%d, offset:%d", sr.PartitionID, events[0].SequenceNumber, *events[0].Offset) + t.Logf("[%s] Received seq:%d, offset:%d", sr.PartitionID, events[0].SequenceNumber, events[0].Offset) require.Equal(t, fmt.Sprintf("event 1 for partition %s", sr.PartitionID), string(events[0].Body)) }(i, sr) diff --git a/sdk/messaging/azeventhubs/event_data.go b/sdk/messaging/azeventhubs/event_data.go index 9300aa51f7c6..00b89a3ca0e1 100644 --- a/sdk/messaging/azeventhubs/event_data.go +++ b/sdk/messaging/azeventhubs/event_data.go @@ -10,7 +10,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/eh" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp" + "github.com/Azure/go-amqp" ) // EventData is an event that can be sent, using the ProducerClient, to an Event Hub. @@ -52,7 +52,7 @@ type ReceivedEventData struct { PartitionKey *string // Offset is the offset of the event. - Offset *int64 + Offset int64 // RawAMQPMessage is the AMQP message, as received by the client. This can be useful to get access // to properties that are not exposed by ReceivedEventData such as payloads encoded into the @@ -177,7 +177,7 @@ func updateFromAMQPAnnotations(src *amqp.Message, dest *ReceivedEventData) error case offsetNumberAnnotation: if offsetStr, ok := v.(string); ok { if offset, err := strconv.ParseInt(offsetStr, 10, 64); err == nil { - dest.Offset = &offset + dest.Offset = offset continue } } diff --git a/sdk/messaging/azeventhubs/event_data_batch.go b/sdk/messaging/azeventhubs/event_data_batch.go index a73e7145df92..edc6517b90b5 100644 --- a/sdk/messaging/azeventhubs/event_data_batch.go +++ b/sdk/messaging/azeventhubs/event_data_batch.go @@ -11,7 +11,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpwrap" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp" + "github.com/Azure/go-amqp" ) // ErrEventDataTooLarge is returned when a message cannot fit into a batch when using the [azeventhubs.EventDataBatch.AddEventData] function. diff --git a/sdk/messaging/azeventhubs/event_data_batch_unit_test.go b/sdk/messaging/azeventhubs/event_data_batch_unit_test.go index 23224e5e8104..4144f09a3635 100644 --- a/sdk/messaging/azeventhubs/event_data_batch_unit_test.go +++ b/sdk/messaging/azeventhubs/event_data_batch_unit_test.go @@ -10,8 +10,8 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpwrap" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/mock" + "github.com/Azure/go-amqp" "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" ) diff --git a/sdk/messaging/azeventhubs/event_data_test.go b/sdk/messaging/azeventhubs/event_data_test.go index bbc25e4cdcdd..0c58c9bde4ea 100644 --- a/sdk/messaging/azeventhubs/event_data_test.go +++ b/sdk/messaging/azeventhubs/event_data_test.go @@ -8,7 +8,7 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp" + "github.com/Azure/go-amqp" "github.com/stretchr/testify/require" ) @@ -20,7 +20,7 @@ func TestEventData_Annotations(t *testing.T) { require.Empty(t, re.Body) require.Nil(t, re.EnqueuedTime) require.Equal(t, int64(0), re.SequenceNumber) - require.Nil(t, re.Offset) + require.Zero(t, re.Offset) require.Nil(t, re.PartitionKey) }) @@ -99,7 +99,7 @@ func TestEventData_newReceivedEventData(t *testing.T) { SystemProperties: map[string]any{ "hello": "world", }, - Offset: to.Ptr[int64](102), + Offset: int64(102), PartitionKey: to.Ptr("partition key"), RawAMQPMessage: &AMQPAnnotatedMessage{ Properties: &AMQPAnnotatedMessageProperties{ diff --git a/sdk/messaging/azeventhubs/example_checkpoint_migration_test.go b/sdk/messaging/azeventhubs/example_checkpoint_migration_test.go index 2692d47c1fe4..937c364a8ae6 100644 --- a/sdk/messaging/azeventhubs/example_checkpoint_migration_test.go +++ b/sdk/messaging/azeventhubs/example_checkpoint_migration_test.go @@ -102,7 +102,7 @@ func Example_migrateCheckpoints() { newCheckpoint.Offset = &offset newCheckpoint.SequenceNumber = &oldCheckpoint.Checkpoint.SequenceNumber - if err := newCheckpointStore.UpdateCheckpoint(context.Background(), newCheckpoint, nil); err != nil { + if err := newCheckpointStore.SetCheckpoint(context.Background(), newCheckpoint, nil); err != nil { panic(err) } } diff --git a/sdk/messaging/azeventhubs/example_consuming_with_checkpoints_test.go b/sdk/messaging/azeventhubs/example_consuming_with_checkpoints_test.go index 1e4243cbafc1..c02ad5804ddd 100644 --- a/sdk/messaging/azeventhubs/example_consuming_with_checkpoints_test.go +++ b/sdk/messaging/azeventhubs/example_consuming_with_checkpoints_test.go @@ -136,7 +136,7 @@ func processEventsForPartition(partitionClient *azeventhubs.ProcessorPartitionCl // Updates the checkpoint with the latest event received. If processing needs to restart // it will restart from this point, automatically. - if err := partitionClient.UpdateCheckpoint(context.TODO(), events[len(events)-1]); err != nil { + if err := partitionClient.UpdateCheckpoint(context.TODO(), events[len(events)-1], nil); err != nil { return err } } @@ -154,7 +154,7 @@ func shutdownPartitionResources(partitionClient *azeventhubs.ProcessorPartitionC defer partitionClient.Close(context.TODO()) } -func createClientsForExample(eventHubConnectionString, eventHubName, storageConnectionString, storageContainerName string) (*azeventhubs.ConsumerClient, *checkpoints.BlobStore, error) { +func createClientsForExample(eventHubConnectionString, eventHubName, storageConnectionString, storageContainerName string) (*azeventhubs.ConsumerClient, azeventhubs.CheckpointStore, error) { // NOTE: the storageContainerName must exist before the checkpoint store can be used. azBlobContainerClient, err := container.NewClientFromConnectionString(storageConnectionString, storageContainerName, nil) diff --git a/sdk/messaging/azeventhubs/go.mod b/sdk/messaging/azeventhubs/go.mod index e158f2a2713d..db6ebc2cd1d6 100644 --- a/sdk/messaging/azeventhubs/go.mod +++ b/sdk/messaging/azeventhubs/go.mod @@ -8,10 +8,11 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub v1.0.0 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0 + github.com/Azure/go-amqp v1.0.0 github.com/golang/mock v1.6.0 github.com/joho/godotenv v1.4.0 github.com/stretchr/testify v1.7.1 - nhooyr.io/websocket v1.8.7 + nhooyr.io/websocket v1.8.7 ) require ( diff --git a/sdk/messaging/azeventhubs/go.sum b/sdk/messaging/azeventhubs/go.sum index 20d483a3af2e..a09abd6c3dc0 100644 --- a/sdk/messaging/azeventhubs/go.sum +++ b/sdk/messaging/azeventhubs/go.sum @@ -10,6 +10,8 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub v1.0. github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub v1.0.0/go.mod h1:Y3gnVwfaz8h6L1YHar+NfWORtBoVUSB5h4GlGkdeF7Q= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0 h1:u/LLAOFgsMv7HmNL4Qufg58y+qElGOt5qv0z1mURkRY= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0/go.mod h1:2e8rMJtl2+2j+HXbTBwnyGpm5Nou7KhvSfxOq8JpTag= +github.com/Azure/go-amqp v1.0.0 h1:QfCugi1M+4F2JDTRgVnRw7PYXLXZ9hmqk3+9+oJh3OA= +github.com/Azure/go-amqp v1.0.0/go.mod h1:+bg0x3ce5+Q3ahCEXnCsGG3ETpDQe3MEVnOuT2ywPwc= github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1 h1:BWe8a+f/t+7KY7zH2mqygeUD0t8hNFXe08p1Pb3/jKE= github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1/go.mod h1:Vt9sXTKwMyGcOxSmLDMnGPgqsUg7m8pe215qMLrDXw4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= diff --git a/sdk/messaging/azeventhubs/inmemory_checkpoint_store_test.go b/sdk/messaging/azeventhubs/inmemory_checkpoint_store_test.go index ea3aa5677ee6..44aacfa626d0 100644 --- a/sdk/messaging/azeventhubs/inmemory_checkpoint_store_test.go +++ b/sdk/messaging/azeventhubs/inmemory_checkpoint_store_test.go @@ -23,7 +23,7 @@ func Test_InMemoryCheckpointStore_Checkpoints(t *testing.T) { require.Empty(t, checkpoints) for i := int64(0); i < 5; i++ { - err = store.UpdateCheckpoint(context.Background(), Checkpoint{ + err = store.SetCheckpoint(context.Background(), Checkpoint{ FullyQualifiedNamespace: "ns", EventHubName: "eh", ConsumerGroup: "cg", @@ -269,7 +269,7 @@ func (cps *testCheckpointStore) ListOwnership(ctx context.Context, fullyQualifie return ownerships, nil } -func (cps *testCheckpointStore) UpdateCheckpoint(ctx context.Context, checkpoint Checkpoint, options *UpdateCheckpointOptions) error { +func (cps *testCheckpointStore) SetCheckpoint(ctx context.Context, checkpoint Checkpoint, options *SetCheckpointOptions) error { cps.checkpointsMu.Lock() defer cps.checkpointsMu.Unlock() diff --git a/sdk/messaging/azeventhubs/internal/amqp_fakes.go b/sdk/messaging/azeventhubs/internal/amqp_fakes.go index 9f354a0f19dd..a64783a7f207 100644 --- a/sdk/messaging/azeventhubs/internal/amqp_fakes.go +++ b/sdk/messaging/azeventhubs/internal/amqp_fakes.go @@ -7,7 +7,7 @@ import ( "context" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpwrap" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp" + "github.com/Azure/go-amqp" ) type FakeNSForPartClient struct { diff --git a/sdk/messaging/azeventhubs/internal/amqpwrap/amqpwrap.go b/sdk/messaging/azeventhubs/internal/amqpwrap/amqpwrap.go index dda9958c591e..c1b3524b7ed7 100644 --- a/sdk/messaging/azeventhubs/internal/amqpwrap/amqpwrap.go +++ b/sdk/messaging/azeventhubs/internal/amqpwrap/amqpwrap.go @@ -10,7 +10,7 @@ import ( "errors" "time" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp" + "github.com/Azure/go-amqp" ) // AMQPReceiver is implemented by *amqp.Receiver diff --git a/sdk/messaging/azeventhubs/internal/amqpwrap/mock_amqp_test.go b/sdk/messaging/azeventhubs/internal/amqpwrap/mock_amqp_test.go index f32dd94aee1e..6f48c0d524e5 100644 --- a/sdk/messaging/azeventhubs/internal/amqpwrap/mock_amqp_test.go +++ b/sdk/messaging/azeventhubs/internal/amqpwrap/mock_amqp_test.go @@ -12,7 +12,7 @@ import ( context "context" reflect "reflect" - go_amqp "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp" + go_amqp "github.com/Azure/go-amqp" gomock "github.com/golang/mock/gomock" ) diff --git a/sdk/messaging/azeventhubs/internal/amqpwrap/rpc.go b/sdk/messaging/azeventhubs/internal/amqpwrap/rpc.go index 12bab2b2fa88..0a7b7a132ff9 100644 --- a/sdk/messaging/azeventhubs/internal/amqpwrap/rpc.go +++ b/sdk/messaging/azeventhubs/internal/amqpwrap/rpc.go @@ -6,7 +6,7 @@ package amqpwrap import ( "context" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp" + "github.com/Azure/go-amqp" ) // RPCResponse is the simplified response structure from an RPC like call diff --git a/sdk/messaging/azeventhubs/internal/cbs.go b/sdk/messaging/azeventhubs/internal/cbs.go index e428ef9056ba..4d41921d35fd 100644 --- a/sdk/messaging/azeventhubs/internal/cbs.go +++ b/sdk/messaging/azeventhubs/internal/cbs.go @@ -11,7 +11,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpwrap" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/auth" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp" + "github.com/Azure/go-amqp" ) const ( diff --git a/sdk/messaging/azeventhubs/internal/cbs_test.go b/sdk/messaging/azeventhubs/internal/cbs_test.go index 13a28559fcff..baa589a96a0e 100644 --- a/sdk/messaging/azeventhubs/internal/cbs_test.go +++ b/sdk/messaging/azeventhubs/internal/cbs_test.go @@ -9,8 +9,8 @@ import ( "testing" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/auth" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/mock" + "github.com/Azure/go-amqp" "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" ) diff --git a/sdk/messaging/azeventhubs/internal/eh/stress/tests/processor_stress_tester.go b/sdk/messaging/azeventhubs/internal/eh/stress/tests/processor_stress_tester.go index a6d2398cb66a..9e9a48032059 100644 --- a/sdk/messaging/azeventhubs/internal/eh/stress/tests/processor_stress_tester.go +++ b/sdk/messaging/azeventhubs/internal/eh/stress/tests/processor_stress_tester.go @@ -285,7 +285,7 @@ func (inf *processorStressTest) receiveForever(ctx context.Context, partClient * if len(events) > 0 { // we're okay, let's update our checkpoint - if err := partClient.UpdateCheckpoint(ctx, events[len(events)-1]); err != nil { + if err := partClient.UpdateCheckpoint(ctx, events[len(events)-1], nil); err != nil { logger("Fatal error updating checkpoint: %s", err) inf.TC.TrackException(err) panic(err) diff --git a/sdk/messaging/azeventhubs/internal/eh/stress/tests/shared.go b/sdk/messaging/azeventhubs/internal/eh/stress/tests/shared.go index 4f291a73c28c..5e5e210c9245 100644 --- a/sdk/messaging/azeventhubs/internal/eh/stress/tests/shared.go +++ b/sdk/messaging/azeventhubs/internal/eh/stress/tests/shared.go @@ -318,7 +318,7 @@ func initCheckpointStore(ctx context.Context, containerName string, testData *st newCheckpoint.SequenceNumber = &partProps.LastEnqueuedSequenceNumber } - if err = cps.UpdateCheckpoint(ctx, newCheckpoint, nil); err != nil { + if err = cps.SetCheckpoint(ctx, newCheckpoint, nil); err != nil { return nil, err } diff --git a/sdk/messaging/azeventhubs/internal/errors.go b/sdk/messaging/azeventhubs/internal/errors.go index c9e011725dfc..71d77e52c7eb 100644 --- a/sdk/messaging/azeventhubs/internal/errors.go +++ b/sdk/messaging/azeventhubs/internal/errors.go @@ -15,7 +15,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpwrap" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp" + "github.com/Azure/go-amqp" ) type errNonRetriable struct { diff --git a/sdk/messaging/azeventhubs/internal/errors_test.go b/sdk/messaging/azeventhubs/internal/errors_test.go index e51ad2cab27f..b85fd1f768fb 100644 --- a/sdk/messaging/azeventhubs/internal/errors_test.go +++ b/sdk/messaging/azeventhubs/internal/errors_test.go @@ -12,7 +12,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azidentity" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpwrap" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp" + "github.com/Azure/go-amqp" "github.com/stretchr/testify/require" ) diff --git a/sdk/messaging/azeventhubs/internal/go-amqp/LICENSE b/sdk/messaging/azeventhubs/internal/go-amqp/LICENSE deleted file mode 100644 index 387b3e7e0f3b..000000000000 --- a/sdk/messaging/azeventhubs/internal/go-amqp/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ - MIT License - - Copyright (C) 2017 Kale Blankenship - Portions Copyright (C) Microsoft Corporation - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE diff --git a/sdk/messaging/azeventhubs/internal/go-amqp/conn.go b/sdk/messaging/azeventhubs/internal/go-amqp/conn.go deleted file mode 100644 index 519f9d4cc5de..000000000000 --- a/sdk/messaging/azeventhubs/internal/go-amqp/conn.go +++ /dev/null @@ -1,1135 +0,0 @@ -// Copyright (C) 2017 Kale Blankenship -// Portions Copyright (c) Microsoft Corporation - -package amqp - -import ( - "bytes" - "context" - "crypto/tls" - "errors" - "fmt" - "math" - "net" - "net/url" - "sync" - "time" - - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp/internal/bitmap" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp/internal/buffer" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp/internal/debug" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp/internal/encoding" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp/internal/frames" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp/internal/shared" -) - -// Default connection options -const ( - defaultIdleTimeout = 1 * time.Minute - defaultMaxFrameSize = 65536 - defaultMaxSessions = 65536 - defaultWriteTimeout = 30 * time.Second -) - -// ConnOptions contains the optional settings for configuring an AMQP connection. -type ConnOptions struct { - // ContainerID sets the container-id to use when opening the connection. - // - // A container ID will be randomly generated if this option is not used. - ContainerID string - - // HostName sets the hostname sent in the AMQP - // Open frame and TLS ServerName (if not otherwise set). - HostName string - - // IdleTimeout specifies the maximum period between - // receiving frames from the peer. - // - // Specify a value less than zero to disable idle timeout. - // - // Default: 1 minute (60000000000). - IdleTimeout time.Duration - - // MaxFrameSize sets the maximum frame size that - // the connection will accept. - // - // Must be 512 or greater. - // - // Default: 512. - MaxFrameSize uint32 - - // MaxSessions sets the maximum number of channels. - // The value must be greater than zero. - // - // Default: 65535. - MaxSessions uint16 - - // Properties sets an entry in the connection properties map sent to the server. - Properties map[string]any - - // SASLType contains the specified SASL authentication mechanism. - SASLType SASLType - - // TLSConfig sets the tls.Config to be used during - // TLS negotiation. - // - // This option is for advanced usage, in most scenarios - // providing a URL scheme of "amqps://" is sufficient. - TLSConfig *tls.Config - - // WriteTimeout controls the write deadline when writing AMQP frames to the - // underlying net.Conn and no caller provided context.Context is available or - // the context contains no deadline (e.g. context.Background()). - // The timeout is set per write. - // - // Setting to a value less than zero means no timeout is set, so writes - // defer to the underlying behavior of net.Conn with no write deadline. - // - // Default: 30s - WriteTimeout time.Duration - - // test hook - dialer dialer -} - -// Dial connects to an AMQP server. -// -// If the addr includes a scheme, it must be "amqp", "amqps", or "amqp+ssl". -// If no port is provided, 5672 will be used for "amqp" and 5671 for "amqps" or "amqp+ssl". -// -// If username and password information is not empty it's used as SASL PLAIN -// credentials, equal to passing ConnSASLPlain option. -// -// opts: pass nil to accept the default values. -func Dial(ctx context.Context, addr string, opts *ConnOptions) (*Conn, error) { - c, err := dialConn(ctx, addr, opts) - if err != nil { - return nil, err - } - err = c.start(ctx) - if err != nil { - return nil, err - } - return c, nil -} - -// NewConn establishes a new AMQP client connection over conn. -// opts: pass nil to accept the default values. -func NewConn(ctx context.Context, conn net.Conn, opts *ConnOptions) (*Conn, error) { - c, err := newConn(conn, opts) - if err != nil { - return nil, err - } - err = c.start(ctx) - if err != nil { - return nil, err - } - return c, nil -} - -// Conn is an AMQP connection. -type Conn struct { - net net.Conn // underlying connection - dialer dialer // used for testing purposes, it allows faking dialing TCP/TLS endpoints - writeTimeout time.Duration // controls write deadline in absense of a context - - // TLS - tlsNegotiation bool // negotiate TLS - tlsComplete bool // TLS negotiation complete - tlsConfig *tls.Config // TLS config, default used if nil (ServerName set to Client.hostname) - - // SASL - saslHandlers map[encoding.Symbol]stateFunc // map of supported handlers keyed by SASL mechanism, SASL not negotiated if nil - saslComplete bool // SASL negotiation complete; internal *except* for SASL auth methods - - // local settings - maxFrameSize uint32 // max frame size to accept - channelMax uint16 // maximum number of channels to allow - hostname string // hostname of remote server (set explicitly or parsed from URL) - idleTimeout time.Duration // maximum period between receiving frames - properties map[encoding.Symbol]any // additional properties sent upon connection open - containerID string // set explicitly or randomly generated - - // peer settings - peerIdleTimeout time.Duration // maximum period between sending frames - peerMaxFrameSize uint32 // maximum frame size peer will accept - - // conn state - done chan struct{} // indicates the connection has terminated - doneErr error // contains the error state returned from Close(); DO NOT TOUCH outside of conn.go until done has been closed! - - // connReader and connWriter management - rxtxExit chan struct{} // signals connReader and connWriter to exit - closeOnce sync.Once // ensures that close() is only called once - - // session tracking - channels *bitmap.Bitmap - sessionsByChannel map[uint16]*Session - sessionsByChannelMu sync.RWMutex - - abandonedSessionsMu sync.Mutex - abandonedSessions []*Session - - // connReader - rxBuf buffer.Buffer // incoming bytes buffer - rxDone chan struct{} // closed when connReader exits - rxErr error // contains last error reading from c.net; DO NOT TOUCH outside of connReader until rxDone has been closed! - - // connWriter - txFrame chan frameEnvelope // AMQP frames to be sent by connWriter - txBuf buffer.Buffer // buffer for marshaling frames before transmitting - txDone chan struct{} // closed when connWriter exits - txErr error // contains last error writing to c.net; DO NOT TOUCH outside of connWriter until txDone has been closed! -} - -// used to abstract the underlying dialer for testing purposes -type dialer interface { - NetDialerDial(ctx context.Context, c *Conn, host, port string) error - TLSDialWithDialer(ctx context.Context, c *Conn, host, port string) error -} - -// implements the dialer interface -type defaultDialer struct{} - -func (defaultDialer) NetDialerDial(ctx context.Context, c *Conn, host, port string) (err error) { - dialer := &net.Dialer{} - c.net, err = dialer.DialContext(ctx, "tcp", net.JoinHostPort(host, port)) - return -} - -func (defaultDialer) TLSDialWithDialer(ctx context.Context, c *Conn, host, port string) (err error) { - dialer := &tls.Dialer{Config: c.tlsConfig} - c.net, err = dialer.DialContext(ctx, "tcp", net.JoinHostPort(host, port)) - return -} - -func dialConn(ctx context.Context, addr string, opts *ConnOptions) (*Conn, error) { - u, err := url.Parse(addr) - if err != nil { - return nil, err - } - host, port := u.Hostname(), u.Port() - if port == "" { - port = "5672" - if u.Scheme == "amqps" || u.Scheme == "amqp+ssl" { - port = "5671" - } - } - - var cp ConnOptions - if opts != nil { - cp = *opts - } - - // prepend SASL credentials when the user/pass segment is not empty - if u.User != nil { - pass, _ := u.User.Password() - cp.SASLType = SASLTypePlain(u.User.Username(), pass) - } - - if cp.HostName == "" { - cp.HostName = host - } - - c, err := newConn(nil, &cp) - if err != nil { - return nil, err - } - - switch u.Scheme { - case "amqp", "": - err = c.dialer.NetDialerDial(ctx, c, host, port) - case "amqps", "amqp+ssl": - c.initTLSConfig() - c.tlsNegotiation = false - err = c.dialer.TLSDialWithDialer(ctx, c, host, port) - default: - err = fmt.Errorf("unsupported scheme %q", u.Scheme) - } - - if err != nil { - return nil, err - } - return c, nil -} - -func newConn(netConn net.Conn, opts *ConnOptions) (*Conn, error) { - c := &Conn{ - dialer: defaultDialer{}, - net: netConn, - maxFrameSize: defaultMaxFrameSize, - peerMaxFrameSize: defaultMaxFrameSize, - channelMax: defaultMaxSessions - 1, // -1 because channel-max starts at zero - idleTimeout: defaultIdleTimeout, - containerID: shared.RandString(40), - done: make(chan struct{}), - rxtxExit: make(chan struct{}), - rxDone: make(chan struct{}), - txFrame: make(chan frameEnvelope), - txDone: make(chan struct{}), - sessionsByChannel: map[uint16]*Session{}, - writeTimeout: defaultWriteTimeout, - } - - // apply options - if opts == nil { - opts = &ConnOptions{} - } - - if opts.WriteTimeout > 0 { - c.writeTimeout = opts.WriteTimeout - } else if opts.WriteTimeout < 0 { - c.writeTimeout = 0 - } - if opts.ContainerID != "" { - c.containerID = opts.ContainerID - } - if opts.HostName != "" { - c.hostname = opts.HostName - } - if opts.IdleTimeout > 0 { - c.idleTimeout = opts.IdleTimeout - } else if opts.IdleTimeout < 0 { - c.idleTimeout = 0 - } - if opts.MaxFrameSize > 0 && opts.MaxFrameSize < 512 { - return nil, fmt.Errorf("invalid MaxFrameSize value %d", opts.MaxFrameSize) - } else if opts.MaxFrameSize > 512 { - c.maxFrameSize = opts.MaxFrameSize - } - if opts.MaxSessions > 0 { - c.channelMax = opts.MaxSessions - } - if opts.SASLType != nil { - if err := opts.SASLType(c); err != nil { - return nil, err - } - } - if opts.Properties != nil { - c.properties = make(map[encoding.Symbol]any) - for key, val := range opts.Properties { - c.properties[encoding.Symbol(key)] = val - } - } - if opts.TLSConfig != nil { - c.tlsConfig = opts.TLSConfig.Clone() - } - if opts.dialer != nil { - c.dialer = opts.dialer - } - return c, nil -} - -func (c *Conn) initTLSConfig() { - // create a new config if not already set - if c.tlsConfig == nil { - c.tlsConfig = new(tls.Config) - } - - // TLS config must have ServerName or InsecureSkipVerify set - if c.tlsConfig.ServerName == "" && !c.tlsConfig.InsecureSkipVerify { - c.tlsConfig.ServerName = c.hostname - } -} - -// start establishes the connection and begins multiplexing network IO. -// It is an error to call Start() on a connection that's been closed. -func (c *Conn) start(ctx context.Context) (err error) { - // if the context has a deadline or is cancellable, start the interruptor goroutine. - // this will close the underlying net.Conn in response to the context. - - if ctx.Done() != nil { - done := make(chan struct{}) - interruptRes := make(chan error, 1) - - defer func() { - close(done) - if ctxErr := <-interruptRes; ctxErr != nil { - // return context error to caller - err = ctxErr - } - }() - - go func() { - select { - case <-ctx.Done(): - c.closeDuringStart() - interruptRes <- ctx.Err() - case <-done: - interruptRes <- nil - } - }() - } - - if err = c.startImpl(ctx); err != nil { - return err - } - - // we can't create the channel bitmap until the connection has been established. - // this is because our peer can tell us the max channels they support. - c.channels = bitmap.New(uint32(c.channelMax)) - - go c.connWriter() - go c.connReader() - - return -} - -func (c *Conn) startImpl(ctx context.Context) error { - // set connection establishment deadline as required - if deadline, ok := ctx.Deadline(); ok && !deadline.IsZero() { - _ = c.net.SetDeadline(deadline) - - // remove connection establishment deadline - defer func() { - _ = c.net.SetDeadline(time.Time{}) - }() - } - - // run connection establishment state machine - for state := c.negotiateProto; state != nil; { - var err error - state, err = state(ctx) - // check if err occurred - if err != nil { - c.closeDuringStart() - return err - } - } - - return nil -} - -// Close closes the connection. -func (c *Conn) Close() error { - c.close() - - // wait until the reader/writer goroutines have exited before proceeding. - // this is to prevent a race between calling Close() and a reader/writer - // goroutine calling close() due to a terminal error. - <-c.txDone - <-c.rxDone - - var connErr *ConnError - if errors.As(c.doneErr, &connErr) && connErr.RemoteErr == nil && connErr.inner == nil { - // an empty ConnectionError means the connection was closed by the caller - return nil - } - - // there was an error during shut-down or connReader/connWriter - // experienced a terminal error - return c.doneErr -} - -// close is called once, either from Close() or when connReader/connWriter exits -func (c *Conn) close() { - c.closeOnce.Do(func() { - defer close(c.done) - - close(c.rxtxExit) - - // wait for writing to stop, allows it to send the final close frame - <-c.txDone - - closeErr := c.net.Close() - - // check rxDone after closing net, otherwise may block - // for up to c.idleTimeout - <-c.rxDone - - if errors.Is(c.rxErr, net.ErrClosed) { - // this is the expected error when the connection is closed, swallow it - c.rxErr = nil - } - - if c.txErr == nil && c.rxErr == nil && closeErr == nil { - // if there are no errors, it means user initiated close() and we shut down cleanly - c.doneErr = &ConnError{} - } else if amqpErr, ok := c.rxErr.(*Error); ok { - // we experienced a peer-initiated close that contained an Error. return it - c.doneErr = &ConnError{RemoteErr: amqpErr} - } else if c.txErr != nil { - // c.txErr is already wrapped in a ConnError - c.doneErr = c.txErr - } else if c.rxErr != nil { - c.doneErr = &ConnError{inner: c.rxErr} - } else { - c.doneErr = &ConnError{inner: closeErr} - } - }) -} - -// closeDuringStart is a special close to be used only during startup (i.e. c.start() and any of its children) -func (c *Conn) closeDuringStart() { - c.closeOnce.Do(func() { - c.net.Close() - }) -} - -// NewSession starts a new session on the connection. -// - ctx controls waiting for the peer to acknowledge the session -// - opts contains optional values, pass nil to accept the defaults -// -// If the context's deadline expires or is cancelled before the operation -// completes, an error is returned. If the Session was successfully -// created, it will be cleaned up in future calls to NewSession. -func (c *Conn) NewSession(ctx context.Context, opts *SessionOptions) (*Session, error) { - // clean up any abandoned sessions first - if err := c.freeAbandonedSessions(ctx); err != nil { - return nil, err - } - - session, err := c.newSession(opts) - if err != nil { - return nil, err - } - - if err := session.begin(ctx); err != nil { - c.abandonSession(session) - return nil, err - } - - return session, nil -} - -func (c *Conn) freeAbandonedSessions(ctx context.Context) error { - c.abandonedSessionsMu.Lock() - defer c.abandonedSessionsMu.Unlock() - - debug.Log(3, "TX (Conn %p): cleaning up %d abandoned sessions", c, len(c.abandonedSessions)) - - for _, s := range c.abandonedSessions { - fr := frames.PerformEnd{} - if err := s.txFrameAndWait(ctx, &fr); err != nil { - return err - } - } - - c.abandonedSessions = nil - return nil -} - -func (c *Conn) newSession(opts *SessionOptions) (*Session, error) { - c.sessionsByChannelMu.Lock() - defer c.sessionsByChannelMu.Unlock() - - // create the next session to allocate - // note that channel always start at 0 - channel, ok := c.channels.Next() - if !ok { - if err := c.Close(); err != nil { - return nil, err - } - return nil, &ConnError{inner: fmt.Errorf("reached connection channel max (%d)", c.channelMax)} - } - session := newSession(c, uint16(channel), opts) - c.sessionsByChannel[session.channel] = session - - return session, nil -} - -func (c *Conn) deleteSession(s *Session) { - c.sessionsByChannelMu.Lock() - defer c.sessionsByChannelMu.Unlock() - - delete(c.sessionsByChannel, s.channel) - c.channels.Remove(uint32(s.channel)) -} - -func (c *Conn) abandonSession(s *Session) { - c.abandonedSessionsMu.Lock() - defer c.abandonedSessionsMu.Unlock() - c.abandonedSessions = append(c.abandonedSessions, s) -} - -// connReader reads from the net.Conn, decodes frames, and either handles -// them here as appropriate or sends them to the session.rx channel. -func (c *Conn) connReader() { - defer func() { - close(c.rxDone) - c.close() - }() - - var sessionsByRemoteChannel = make(map[uint16]*Session) - var err error - for { - if err != nil { - debug.Log(1, "RX (connReader %p): terminal error: %v", c, err) - c.rxErr = err - return - } - - var fr frames.Frame - fr, err = c.readFrame() - if err != nil { - continue - } - - debug.Log(1, "RX (connReader %p): %s", c, fr) - - var ( - session *Session - ok bool - ) - - switch body := fr.Body.(type) { - // Server initiated close. - case *frames.PerformClose: - // connWriter will send the close performative ack on its way out. - // it's a SHOULD though, not a MUST. - if body.Error == nil { - return - } - err = body.Error - continue - - // RemoteChannel should be used when frame is Begin - case *frames.PerformBegin: - if body.RemoteChannel == nil { - // since we only support remotely-initiated sessions, this is an error - // TODO: it would be ideal to not have this kill the connection - err = fmt.Errorf("%T: nil RemoteChannel", fr.Body) - continue - } - c.sessionsByChannelMu.RLock() - session, ok = c.sessionsByChannel[*body.RemoteChannel] - c.sessionsByChannelMu.RUnlock() - if !ok { - // this can happen if NewSession() exits due to the context expiring/cancelled - // before the begin ack is received. - err = fmt.Errorf("unexpected remote channel number %d", *body.RemoteChannel) - continue - } - - session.remoteChannel = fr.Channel - sessionsByRemoteChannel[fr.Channel] = session - - case *frames.PerformEnd: - session, ok = sessionsByRemoteChannel[fr.Channel] - if !ok { - err = fmt.Errorf("%T: didn't find channel %d in sessionsByRemoteChannel (PerformEnd)", fr.Body, fr.Channel) - continue - } - // we MUST remove the remote channel from our map as soon as we receive - // the ack (i.e. before passing it on to the session mux) on the session - // ending since the numbers are recycled. - delete(sessionsByRemoteChannel, fr.Channel) - c.deleteSession(session) - - default: - // pass on performative to the correct session - session, ok = sessionsByRemoteChannel[fr.Channel] - if !ok { - err = fmt.Errorf("%T: didn't find channel %d in sessionsByRemoteChannel", fr.Body, fr.Channel) - continue - } - } - - q := session.rxQ.Acquire() - q.Enqueue(fr.Body) - session.rxQ.Release(q) - debug.Log(2, "RX (connReader %p): mux frame to Session (%p): %s", c, session, fr) - } -} - -// readFrame reads a complete frame from c.net. -// it assumes that any read deadline has already been applied. -// used externally by SASL only. -func (c *Conn) readFrame() (frames.Frame, error) { - switch { - // Cheaply reuse free buffer space when fully read. - case c.rxBuf.Len() == 0: - c.rxBuf.Reset() - - // Prevent excessive/unbounded growth by shifting data to beginning of buffer. - case int64(c.rxBuf.Size()) > int64(c.maxFrameSize): - c.rxBuf.Reclaim() - } - - var ( - currentHeader frames.Header // keep track of the current header, for frames split across multiple TCP packets - frameInProgress bool // true if in the middle of receiving data for currentHeader - ) - - for { - // need to read more if buf doesn't contain the complete frame - // or there's not enough in buf to parse the header - if frameInProgress || c.rxBuf.Len() < frames.HeaderSize { - // we MUST reset the idle timeout before each read from net.Conn - if c.idleTimeout > 0 { - _ = c.net.SetReadDeadline(time.Now().Add(c.idleTimeout)) - } - err := c.rxBuf.ReadFromOnce(c.net) - if err != nil { - return frames.Frame{}, err - } - } - - // read more if buf doesn't contain enough to parse the header - if c.rxBuf.Len() < frames.HeaderSize { - continue - } - - // parse the header if a frame isn't in progress - if !frameInProgress { - var err error - currentHeader, err = frames.ParseHeader(&c.rxBuf) - if err != nil { - return frames.Frame{}, err - } - frameInProgress = true - } - - // check size is reasonable - if currentHeader.Size > math.MaxInt32 { // make max size configurable - return frames.Frame{}, errors.New("payload too large") - } - - bodySize := int64(currentHeader.Size - frames.HeaderSize) - - // the full frame hasn't been received, keep reading - if int64(c.rxBuf.Len()) < bodySize { - continue - } - frameInProgress = false - - // check if body is empty (keepalive) - if bodySize == 0 { - debug.Log(3, "RX (connReader %p): received keep-alive frame", c) - continue - } - - // parse the frame - b, ok := c.rxBuf.Next(bodySize) - if !ok { - return frames.Frame{}, fmt.Errorf("buffer EOF; requested bytes: %d, actual size: %d", bodySize, c.rxBuf.Len()) - } - - parsedBody, err := frames.ParseBody(buffer.New(b)) - if err != nil { - return frames.Frame{}, err - } - - return frames.Frame{Channel: currentHeader.Channel, Body: parsedBody}, nil - } -} - -// frameEnvelope is used when sending a frame to connWriter to be written to net.Conn -type frameEnvelope struct { - Ctx context.Context - Frame frames.Frame - - // optional channel that is closed on successful write to net.Conn or contains the write error - // NOTE: use a buffered channel of size 1 when populating - Sent chan error -} - -func (c *Conn) connWriter() { - defer func() { - close(c.txDone) - c.close() - }() - - var ( - // keepalives are sent at a rate of 1/2 idle timeout - keepaliveInterval = c.peerIdleTimeout / 2 - // 0 disables keepalives - keepalivesEnabled = keepaliveInterval > 0 - // set if enable, nil if not; nil channels block forever - keepalive <-chan time.Time - ) - - if keepalivesEnabled { - ticker := time.NewTicker(keepaliveInterval) - defer ticker.Stop() - keepalive = ticker.C - } - - var err error - for { - if err != nil { - debug.Log(1, "TX (connWriter %p): terminal error: %v", c, err) - c.txErr = err - return - } - - select { - // frame write request - case env := <-c.txFrame: - timeout, ctxErr := c.getWriteTimeout(env.Ctx) - if ctxErr != nil { - debug.Log(1, "TX (connWriter %p) deadline exceeded: %s", c, env.Frame) - if env.Sent != nil { - env.Sent <- ctxErr - } - continue - } - - debug.Log(1, "TX (connWriter %p) timeout %s: %s", c, timeout, env.Frame) - err = c.writeFrame(timeout, env.Frame) - if env.Sent != nil { - if err == nil { - close(env.Sent) - } else { - env.Sent <- err - } - } - - // keepalive timer - case <-keepalive: - debug.Log(3, "TX (connWriter %p): sending keep-alive frame", c) - _ = c.net.SetWriteDeadline(time.Now().Add(c.writeTimeout)) - if _, err = c.net.Write(keepaliveFrame); err != nil { - err = &ConnError{inner: err} - } - // It would be slightly more efficient in terms of network - // resources to reset the timer each time a frame is sent. - // However, keepalives are small (8 bytes) and the interval - // is usually on the order of minutes. It does not seem - // worth it to add extra operations in the write path to - // avoid. (To properly reset a timer it needs to be stopped, - // possibly drained, then reset.) - - // connection complete - case <-c.rxtxExit: - // send close performative. note that the spec says we - // SHOULD wait for the ack but we don't HAVE to, in order - // to be resilient to bad actors etc. so we just send - // the close performative and exit. - fr := frames.Frame{ - Type: frames.TypeAMQP, - Body: &frames.PerformClose{}, - } - debug.Log(1, "TX (connWriter %p): %s", c, fr) - c.txErr = c.writeFrame(c.writeTimeout, fr) - return - } - } -} - -// writeFrame writes a frame to the network. -// used externally by SASL only. -// - timeout - the write deadline to set. zero means no deadline -// -// errors are wrapped in a ConnError as they can be returned to outside callers. -func (c *Conn) writeFrame(timeout time.Duration, fr frames.Frame) error { - // writeFrame into txBuf - c.txBuf.Reset() - err := frames.Write(&c.txBuf, fr) - if err != nil { - return &ConnError{inner: err} - } - - // validate the frame isn't exceeding peer's max frame size - requiredFrameSize := c.txBuf.Len() - if uint64(requiredFrameSize) > uint64(c.peerMaxFrameSize) { - return &ConnError{inner: fmt.Errorf("%T frame size %d larger than peer's max frame size %d", fr, requiredFrameSize, c.peerMaxFrameSize)} - } - - if timeout == 0 { - _ = c.net.SetWriteDeadline(time.Time{}) - } else if timeout > 0 { - _ = c.net.SetWriteDeadline(time.Now().Add(timeout)) - } - - // write to network - n, err := c.net.Write(c.txBuf.Bytes()) - if l := c.txBuf.Len(); n > 0 && n < l && err != nil { - debug.Log(1, "TX (writeFrame %p): wrote %d bytes less than len %d: %v", c, n, l, err) - } - if err != nil { - err = &ConnError{inner: err} - } - return err -} - -// writeProtoHeader writes an AMQP protocol header to the -// network -func (c *Conn) writeProtoHeader(pID protoID) error { - _, err := c.net.Write([]byte{'A', 'M', 'Q', 'P', byte(pID), 1, 0, 0}) - return err -} - -// keepaliveFrame is an AMQP frame with no body, used for keepalives -var keepaliveFrame = []byte{0x00, 0x00, 0x00, 0x08, 0x02, 0x00, 0x00, 0x00} - -// SendFrame is used by sessions and links to send frames across the network. -// - ctx is used to provide the write deadline -// - fr is the frame to write to net.Conn -// - sent is the optional channel that will contain the error if the write fails -func (c *Conn) sendFrame(ctx context.Context, fr frames.Frame, sent chan error) { - select { - case c.txFrame <- frameEnvelope{Ctx: ctx, Frame: fr, Sent: sent}: - debug.Log(2, "TX (Conn %p): mux frame to connWriter: %s", c, fr) - case <-c.done: - if sent != nil { - sent <- c.doneErr - } - } -} - -// stateFunc is a state in a state machine. -// -// The state is advanced by returning the next state. -// The state machine concludes when nil is returned. -type stateFunc func(context.Context) (stateFunc, error) - -// negotiateProto determines which proto to negotiate next. -// used externally by SASL only. -func (c *Conn) negotiateProto(ctx context.Context) (stateFunc, error) { - // in the order each must be negotiated - switch { - case c.tlsNegotiation && !c.tlsComplete: - return c.exchangeProtoHeader(protoTLS) - case c.saslHandlers != nil && !c.saslComplete: - return c.exchangeProtoHeader(protoSASL) - default: - return c.exchangeProtoHeader(protoAMQP) - } -} - -type protoID uint8 - -// protocol IDs received in protoHeaders -const ( - protoAMQP protoID = 0x0 - protoTLS protoID = 0x2 - protoSASL protoID = 0x3 -) - -// exchangeProtoHeader performs the round trip exchange of protocol -// headers, validation, and returns the protoID specific next state. -func (c *Conn) exchangeProtoHeader(pID protoID) (stateFunc, error) { - // write the proto header - if err := c.writeProtoHeader(pID); err != nil { - return nil, err - } - - // read response header - p, err := c.readProtoHeader() - if err != nil { - return nil, err - } - - if pID != p.ProtoID { - return nil, fmt.Errorf("unexpected protocol header %#00x, expected %#00x", p.ProtoID, pID) - } - - // go to the proto specific state - switch pID { - case protoAMQP: - return c.openAMQP, nil - case protoTLS: - return c.startTLS, nil - case protoSASL: - return c.negotiateSASL, nil - default: - return nil, fmt.Errorf("unknown protocol ID %#02x", p.ProtoID) - } -} - -// readProtoHeader reads a protocol header packet from c.rxProto. -func (c *Conn) readProtoHeader() (protoHeader, error) { - const protoHeaderSize = 8 - - // only read from the network once our buffer has been exhausted. - // TODO: this preserves existing behavior as some tests rely on this - // implementation detail (it lets you replay a stream of bytes). we - // might want to consider removing this and fixing the tests as the - // protocol doesn't actually work this way. - if c.rxBuf.Len() == 0 { - for { - err := c.rxBuf.ReadFromOnce(c.net) - if err != nil { - return protoHeader{}, err - } - - // read more if buf doesn't contain enough to parse the header - if c.rxBuf.Len() >= protoHeaderSize { - break - } - } - } - - buf, ok := c.rxBuf.Next(protoHeaderSize) - if !ok { - return protoHeader{}, errors.New("invalid protoHeader") - } - // bounds check hint to compiler; see golang.org/issue/14808 - _ = buf[protoHeaderSize-1] - - if !bytes.Equal(buf[:4], []byte{'A', 'M', 'Q', 'P'}) { - return protoHeader{}, fmt.Errorf("unexpected protocol %q", buf[:4]) - } - - p := protoHeader{ - ProtoID: protoID(buf[4]), - Major: buf[5], - Minor: buf[6], - Revision: buf[7], - } - - if p.Major != 1 || p.Minor != 0 || p.Revision != 0 { - return protoHeader{}, fmt.Errorf("unexpected protocol version %d.%d.%d", p.Major, p.Minor, p.Revision) - } - - return p, nil -} - -// startTLS wraps the conn with TLS and returns to Client.negotiateProto -func (c *Conn) startTLS(ctx context.Context) (stateFunc, error) { - c.initTLSConfig() - - _ = c.net.SetReadDeadline(time.Time{}) // clear timeout - - // wrap existing net.Conn and perform TLS handshake - tlsConn := tls.Client(c.net, c.tlsConfig) - if err := tlsConn.HandshakeContext(ctx); err != nil { - return nil, err - } - - // swap net.Conn - c.net = tlsConn - c.tlsComplete = true - - // go to next protocol - return c.negotiateProto, nil -} - -// openAMQP round trips the AMQP open performative -func (c *Conn) openAMQP(ctx context.Context) (stateFunc, error) { - // send open frame - open := &frames.PerformOpen{ - ContainerID: c.containerID, - Hostname: c.hostname, - MaxFrameSize: c.maxFrameSize, - ChannelMax: c.channelMax, - IdleTimeout: c.idleTimeout / 2, // per spec, advertise half our idle timeout - Properties: c.properties, - } - fr := frames.Frame{ - Type: frames.TypeAMQP, - Body: open, - Channel: 0, - } - debug.Log(1, "TX (openAMQP %p): %s", c, fr) - timeout, err := c.getWriteTimeout(ctx) - if err != nil { - return nil, err - } - if err = c.writeFrame(timeout, fr); err != nil { - return nil, err - } - - // get the response - fr, err = c.readSingleFrame() - if err != nil { - return nil, err - } - debug.Log(1, "RX (openAMQP %p): %s", c, fr) - o, ok := fr.Body.(*frames.PerformOpen) - if !ok { - return nil, fmt.Errorf("openAMQP: unexpected frame type %T", fr.Body) - } - - // update peer settings - if o.MaxFrameSize > 0 { - c.peerMaxFrameSize = o.MaxFrameSize - } - if o.IdleTimeout > 0 { - // TODO: reject very small idle timeouts - c.peerIdleTimeout = o.IdleTimeout - } - if o.ChannelMax < c.channelMax { - c.channelMax = o.ChannelMax - } - - // connection established, exit state machine - return nil, nil -} - -// negotiateSASL returns the SASL handler for the first matched -// mechanism specified by the server -func (c *Conn) negotiateSASL(context.Context) (stateFunc, error) { - // read mechanisms frame - fr, err := c.readSingleFrame() - if err != nil { - return nil, err - } - debug.Log(1, "RX (negotiateSASL %p): %s", c, fr) - sm, ok := fr.Body.(*frames.SASLMechanisms) - if !ok { - return nil, fmt.Errorf("negotiateSASL: unexpected frame type %T", fr.Body) - } - - // return first match in c.saslHandlers based on order received - for _, mech := range sm.Mechanisms { - if state, ok := c.saslHandlers[mech]; ok { - return state, nil - } - } - - // no match - return nil, fmt.Errorf("no supported auth mechanism (%v)", sm.Mechanisms) // TODO: send "auth not supported" frame? -} - -// saslOutcome processes the SASL outcome frame and return Client.negotiateProto -// on success. -// -// SASL handlers return this stateFunc when the mechanism specific negotiation -// has completed. -// used externally by SASL only. -func (c *Conn) saslOutcome(context.Context) (stateFunc, error) { - // read outcome frame - fr, err := c.readSingleFrame() - if err != nil { - return nil, err - } - debug.Log(1, "RX (saslOutcome %p): %s", c, fr) - so, ok := fr.Body.(*frames.SASLOutcome) - if !ok { - return nil, fmt.Errorf("saslOutcome: unexpected frame type %T", fr.Body) - } - - // check if auth succeeded - if so.Code != encoding.CodeSASLOK { - return nil, fmt.Errorf("SASL PLAIN auth failed with code %#00x: %s", so.Code, so.AdditionalData) // implement Stringer for so.Code - } - - // return to c.negotiateProto - c.saslComplete = true - return c.negotiateProto, nil -} - -// readSingleFrame is used during connection establishment to read a single frame. -// -// After setup, conn.connReader handles incoming frames. -func (c *Conn) readSingleFrame() (frames.Frame, error) { - fr, err := c.readFrame() - if err != nil { - return frames.Frame{}, err - } - - return fr, nil -} - -// getWriteTimeout returns the timeout as calculated from the context's deadline -// or the default write timeout if the context has no deadline. -// if the context has timed out or was cancelled, an error is returned. -func (c *Conn) getWriteTimeout(ctx context.Context) (time.Duration, error) { - if deadline, ok := ctx.Deadline(); ok { - until := time.Until(deadline) - if until <= 0 { - return 0, context.DeadlineExceeded - } - return until, nil - } - return c.writeTimeout, nil -} - -type protoHeader struct { - ProtoID protoID - Major uint8 - Minor uint8 - Revision uint8 -} diff --git a/sdk/messaging/azeventhubs/internal/go-amqp/const.go b/sdk/messaging/azeventhubs/internal/go-amqp/const.go deleted file mode 100644 index fee0b5041525..000000000000 --- a/sdk/messaging/azeventhubs/internal/go-amqp/const.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright (C) 2017 Kale Blankenship -// Portions Copyright (c) Microsoft Corporation - -package amqp - -import "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp/internal/encoding" - -// Sender Settlement Modes -const ( - // Sender will send all deliveries initially unsettled to the receiver. - SenderSettleModeUnsettled SenderSettleMode = encoding.SenderSettleModeUnsettled - - // Sender will send all deliveries settled to the receiver. - SenderSettleModeSettled SenderSettleMode = encoding.SenderSettleModeSettled - - // Sender MAY send a mixture of settled and unsettled deliveries to the receiver. - SenderSettleModeMixed SenderSettleMode = encoding.SenderSettleModeMixed -) - -// SenderSettleMode specifies how the sender will settle messages. -type SenderSettleMode = encoding.SenderSettleMode - -func senderSettleModeValue(m *SenderSettleMode) SenderSettleMode { - if m == nil { - return SenderSettleModeMixed - } - return *m -} - -// Receiver Settlement Modes -const ( - // Receiver is the first to consider the message as settled. - // Once the corresponding disposition frame is sent, the message - // is considered to be settled. - ReceiverSettleModeFirst ReceiverSettleMode = encoding.ReceiverSettleModeFirst - - // Receiver is the second to consider the message as settled. - // Once the corresponding disposition frame is sent, the settlement - // is considered in-flight and the message will not be considered as - // settled until the sender replies acknowledging the settlement. - ReceiverSettleModeSecond ReceiverSettleMode = encoding.ReceiverSettleModeSecond -) - -// ReceiverSettleMode specifies how the receiver will settle messages. -type ReceiverSettleMode = encoding.ReceiverSettleMode - -func receiverSettleModeValue(m *ReceiverSettleMode) ReceiverSettleMode { - if m == nil { - return ReceiverSettleModeFirst - } - return *m -} - -// Durability Policies -const ( - // No terminus state is retained durably. - DurabilityNone Durability = encoding.DurabilityNone - - // Only the existence and configuration of the terminus is - // retained durably. - DurabilityConfiguration Durability = encoding.DurabilityConfiguration - - // In addition to the existence and configuration of the - // terminus, the unsettled state for durable messages is - // retained durably. - DurabilityUnsettledState Durability = encoding.DurabilityUnsettledState -) - -// Durability specifies the durability of a link. -type Durability = encoding.Durability - -// Expiry Policies -const ( - // The expiry timer starts when terminus is detached. - ExpiryPolicyLinkDetach ExpiryPolicy = encoding.ExpiryLinkDetach - - // The expiry timer starts when the most recently - // associated session is ended. - ExpiryPolicySessionEnd ExpiryPolicy = encoding.ExpirySessionEnd - - // The expiry timer starts when most recently associated - // connection is closed. - ExpiryPolicyConnectionClose ExpiryPolicy = encoding.ExpiryConnectionClose - - // The terminus never expires. - ExpiryPolicyNever ExpiryPolicy = encoding.ExpiryNever -) - -// ExpiryPolicy specifies when the expiry timer of a terminus -// starts counting down from the timeout value. -// -// If the link is subsequently re-attached before the terminus is expired, -// then the count down is aborted. If the conditions for the -// terminus-expiry-policy are subsequently re-met, the expiry timer restarts -// from its originally configured timeout value. -type ExpiryPolicy = encoding.ExpiryPolicy diff --git a/sdk/messaging/azeventhubs/internal/go-amqp/creditor.go b/sdk/messaging/azeventhubs/internal/go-amqp/creditor.go deleted file mode 100644 index 184702bca7d2..000000000000 --- a/sdk/messaging/azeventhubs/internal/go-amqp/creditor.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright (c) Microsoft Corporation - -package amqp - -import ( - "context" - "errors" - "sync" -) - -type creditor struct { - mu sync.Mutex - - // future values for the next flow frame. - pendingDrain bool - creditsToAdd uint32 - - // drained is set when a drain is active and we're waiting - // for the corresponding flow from the remote. - drained chan struct{} -} - -var ( - errLinkDraining = errors.New("link is currently draining, no credits can be added") - errAlreadyDraining = errors.New("drain already in process") -) - -// EndDrain ends the current drain, unblocking any active Drain calls. -func (mc *creditor) EndDrain() { - mc.mu.Lock() - defer mc.mu.Unlock() - - if mc.drained != nil { - close(mc.drained) - mc.drained = nil - } -} - -// FlowBits gets gets the proper values for the next flow frame -// and resets the internal state. -// Returns: -// -// (drain: true, credits: 0) if a flow is needed (drain) -// (drain: false, credits > 0) if a flow is needed (issue credit) -// (drain: false, credits == 0) if no flow needed. -func (mc *creditor) FlowBits(currentCredits uint32) (bool, uint32) { - mc.mu.Lock() - defer mc.mu.Unlock() - - drain := mc.pendingDrain - var credits uint32 - - if mc.pendingDrain { - // only send one drain request - mc.pendingDrain = false - } - - // either: - // drain is true (ie, we're going to send a drain frame, and the credits for it should be 0) - // mc.creditsToAdd == 0 (no flow frame needed, no new credits are being issued) - if drain || mc.creditsToAdd == 0 { - credits = 0 - } else { - credits = mc.creditsToAdd + currentCredits - } - - mc.creditsToAdd = 0 - - return drain, credits -} - -// Drain initiates a drain and blocks until EndDrain is called. -// If the context's deadline expires or is cancelled before the operation -// completes, the drain might not have happened. -func (mc *creditor) Drain(ctx context.Context, r *Receiver) error { - mc.mu.Lock() - - if mc.drained != nil { - mc.mu.Unlock() - return errAlreadyDraining - } - - mc.drained = make(chan struct{}) - // use a local copy to avoid racing with EndDrain() - drained := mc.drained - mc.pendingDrain = true - - mc.mu.Unlock() - - // cause mux() to check our flow conditions. - select { - case r.receiverReady <- struct{}{}: - default: - } - - // send drain, wait for responding flow frame - select { - case <-drained: - return nil - case <-r.l.done: - return r.l.doneErr - case <-ctx.Done(): - return ctx.Err() - } -} - -// IssueCredit queues up additional credits to be requested at the next -// call of FlowBits() -func (mc *creditor) IssueCredit(credits uint32) error { - mc.mu.Lock() - defer mc.mu.Unlock() - - if mc.drained != nil { - return errLinkDraining - } - - mc.creditsToAdd += credits - return nil -} diff --git a/sdk/messaging/azeventhubs/internal/go-amqp/errors.go b/sdk/messaging/azeventhubs/internal/go-amqp/errors.go deleted file mode 100644 index 515a7c36bca3..000000000000 --- a/sdk/messaging/azeventhubs/internal/go-amqp/errors.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright (C) 2017 Kale Blankenship -// Portions Copyright (c) Microsoft Corporation - -package amqp - -import ( - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp/internal/encoding" -) - -// ErrCond is an AMQP defined error condition. -// See http://docs.oasis-open.org/amqp/core/v1.0/os/amqp-core-transport-v1.0-os.html#type-amqp-error for info on their meaning. -type ErrCond = encoding.ErrCond - -// Error Conditions -const ( - // AMQP Errors - ErrCondDecodeError ErrCond = "amqp:decode-error" - ErrCondFrameSizeTooSmall ErrCond = "amqp:frame-size-too-small" - ErrCondIllegalState ErrCond = "amqp:illegal-state" - ErrCondInternalError ErrCond = "amqp:internal-error" - ErrCondInvalidField ErrCond = "amqp:invalid-field" - ErrCondNotAllowed ErrCond = "amqp:not-allowed" - ErrCondNotFound ErrCond = "amqp:not-found" - ErrCondNotImplemented ErrCond = "amqp:not-implemented" - ErrCondPreconditionFailed ErrCond = "amqp:precondition-failed" - ErrCondResourceDeleted ErrCond = "amqp:resource-deleted" - ErrCondResourceLimitExceeded ErrCond = "amqp:resource-limit-exceeded" - ErrCondResourceLocked ErrCond = "amqp:resource-locked" - ErrCondUnauthorizedAccess ErrCond = "amqp:unauthorized-access" - - // Connection Errors - ErrCondConnectionForced ErrCond = "amqp:connection:forced" - ErrCondConnectionRedirect ErrCond = "amqp:connection:redirect" - ErrCondFramingError ErrCond = "amqp:connection:framing-error" - - // Session Errors - ErrCondErrantLink ErrCond = "amqp:session:errant-link" - ErrCondHandleInUse ErrCond = "amqp:session:handle-in-use" - ErrCondUnattachedHandle ErrCond = "amqp:session:unattached-handle" - ErrCondWindowViolation ErrCond = "amqp:session:window-violation" - - // Link Errors - ErrCondDetachForced ErrCond = "amqp:link:detach-forced" - ErrCondLinkRedirect ErrCond = "amqp:link:redirect" - ErrCondMessageSizeExceeded ErrCond = "amqp:link:message-size-exceeded" - ErrCondStolen ErrCond = "amqp:link:stolen" - ErrCondTransferLimitExceeded ErrCond = "amqp:link:transfer-limit-exceeded" -) - -// Error is an AMQP error. -type Error = encoding.Error - -// LinkError is returned by methods on Sender/Receiver when the link has closed. -type LinkError struct { - // RemoteErr contains any error information provided by the peer if the peer detached the link. - RemoteErr *Error - - inner error -} - -// Error implements the error interface for LinkError. -func (e *LinkError) Error() string { - if e.RemoteErr == nil && e.inner == nil { - return "amqp: link closed" - } else if e.RemoteErr != nil { - return e.RemoteErr.Error() - } - return e.inner.Error() -} - -// ConnError is returned by methods on Conn and propagated to Session and Senders/Receivers -// when the connection has been closed. -type ConnError struct { - // RemoteErr contains any error information provided by the peer if the peer closed the AMQP connection. - RemoteErr *Error - - inner error -} - -// Error implements the error interface for ConnectionError. -func (e *ConnError) Error() string { - if e.RemoteErr == nil && e.inner == nil { - return "amqp: connection closed" - } else if e.RemoteErr != nil { - return e.RemoteErr.Error() - } - return e.inner.Error() -} - -// SessionError is returned by methods on Session and propagated to Senders/Receivers -// when the session has been closed. -type SessionError struct { - // RemoteErr contains any error information provided by the peer if the peer closed the session. - RemoteErr *Error - - inner error -} - -// Error implements the error interface for SessionError. -func (e *SessionError) Error() string { - if e.RemoteErr == nil && e.inner == nil { - return "amqp: session closed" - } else if e.RemoteErr != nil { - return e.RemoteErr.Error() - } - return e.inner.Error() -} diff --git a/sdk/messaging/azeventhubs/internal/go-amqp/internal/bitmap/bitmap.go b/sdk/messaging/azeventhubs/internal/go-amqp/internal/bitmap/bitmap.go deleted file mode 100644 index d4d682e9199e..000000000000 --- a/sdk/messaging/azeventhubs/internal/go-amqp/internal/bitmap/bitmap.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright (C) 2017 Kale Blankenship -// Portions Copyright (c) Microsoft Corporation - -package bitmap - -import ( - "math/bits" -) - -// bitmap is a lazily initialized bitmap -type Bitmap struct { - max uint32 - bits []uint64 -} - -func New(max uint32) *Bitmap { - return &Bitmap{max: max} -} - -// add sets n in the bitmap. -// -// bits will be expanded as needed. -// -// If n is greater than max, the call has no effect. -func (b *Bitmap) Add(n uint32) { - if n > b.max { - return - } - - var ( - idx = n / 64 - offset = n % 64 - ) - - if l := len(b.bits); int(idx) >= l { - b.bits = append(b.bits, make([]uint64, int(idx)-l+1)...) - } - - b.bits[idx] |= 1 << offset -} - -// remove clears n from the bitmap. -// -// If n is not set or greater than max the call has not effect. -func (b *Bitmap) Remove(n uint32) { - var ( - idx = n / 64 - offset = n % 64 - ) - - if int(idx) >= len(b.bits) { - return - } - - b.bits[idx] &= ^uint64(1 << offset) -} - -// next sets and returns the lowest unset bit in the bitmap. -// -// bits will be expanded if necessary. -// -// If there are no unset bits below max, the second return -// value will be false. -func (b *Bitmap) Next() (uint32, bool) { - // find the first unset bit - for i, v := range b.bits { - // skip if all bits are set - if v == ^uint64(0) { - continue - } - - var ( - offset = bits.TrailingZeros64(^v) // invert and count zeroes - next = uint32(i*64 + offset) - ) - - // check if in bounds - if next > b.max { - return next, false - } - - // set bit - b.bits[i] |= 1 << uint32(offset) - return next, true - } - - // no unset bits in the current slice, - // check if the full range has been allocated - if uint64(len(b.bits)*64) > uint64(b.max) { - return 0, false - } - - // full range not allocated, append entry with first - // bit set - b.bits = append(b.bits, 1) - - // return the value of the first bit - return uint32(len(b.bits)-1) * 64, true -} diff --git a/sdk/messaging/azeventhubs/internal/go-amqp/internal/buffer/buffer.go b/sdk/messaging/azeventhubs/internal/go-amqp/internal/buffer/buffer.go deleted file mode 100644 index b82e5fab76a6..000000000000 --- a/sdk/messaging/azeventhubs/internal/go-amqp/internal/buffer/buffer.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright (C) 2017 Kale Blankenship -// Portions Copyright (c) Microsoft Corporation - -package buffer - -import ( - "encoding/binary" - "io" -) - -// buffer is similar to bytes.Buffer but specialized for this package -type Buffer struct { - b []byte - i int -} - -func New(b []byte) *Buffer { - return &Buffer{b: b} -} - -func (b *Buffer) Next(n int64) ([]byte, bool) { - if b.readCheck(n) { - buf := b.b[b.i:len(b.b)] - b.i = len(b.b) - return buf, false - } - - buf := b.b[b.i : b.i+int(n)] - b.i += int(n) - return buf, true -} - -func (b *Buffer) Skip(n int) { - b.i += n -} - -func (b *Buffer) Reset() { - b.b = b.b[:0] - b.i = 0 -} - -// reclaim shifts used buffer space to the beginning of the -// underlying slice. -func (b *Buffer) Reclaim() { - l := b.Len() - copy(b.b[:l], b.b[b.i:]) - b.b = b.b[:l] - b.i = 0 -} - -func (b *Buffer) readCheck(n int64) bool { - return int64(b.i)+n > int64(len(b.b)) -} - -func (b *Buffer) ReadByte() (byte, error) { - if b.readCheck(1) { - return 0, io.EOF - } - - byte_ := b.b[b.i] - b.i++ - return byte_, nil -} - -func (b *Buffer) PeekByte() (byte, error) { - if b.readCheck(1) { - return 0, io.EOF - } - - return b.b[b.i], nil -} - -func (b *Buffer) ReadUint16() (uint16, error) { - if b.readCheck(2) { - return 0, io.EOF - } - - n := binary.BigEndian.Uint16(b.b[b.i:]) - b.i += 2 - return n, nil -} - -func (b *Buffer) ReadUint32() (uint32, error) { - if b.readCheck(4) { - return 0, io.EOF - } - - n := binary.BigEndian.Uint32(b.b[b.i:]) - b.i += 4 - return n, nil -} - -func (b *Buffer) ReadUint64() (uint64, error) { - if b.readCheck(8) { - return 0, io.EOF - } - - n := binary.BigEndian.Uint64(b.b[b.i : b.i+8]) - b.i += 8 - return n, nil -} - -func (b *Buffer) ReadFromOnce(r io.Reader) error { - const minRead = 512 - - l := len(b.b) - if cap(b.b)-l < minRead { - total := l * 2 - if total == 0 { - total = minRead - } - new := make([]byte, l, total) - copy(new, b.b) - b.b = new - } - - n, err := r.Read(b.b[l:cap(b.b)]) - b.b = b.b[:l+n] - return err -} - -func (b *Buffer) Append(p []byte) { - b.b = append(b.b, p...) -} - -func (b *Buffer) AppendByte(bb byte) { - b.b = append(b.b, bb) -} - -func (b *Buffer) AppendString(s string) { - b.b = append(b.b, s...) -} - -func (b *Buffer) Len() int { - return len(b.b) - b.i -} - -func (b *Buffer) Size() int { - return b.i -} - -func (b *Buffer) Bytes() []byte { - return b.b[b.i:] -} - -func (b *Buffer) Detach() []byte { - temp := b.b - b.b = nil - b.i = 0 - return temp -} - -func (b *Buffer) AppendUint16(n uint16) { - b.b = append(b.b, - byte(n>>8), - byte(n), - ) -} - -func (b *Buffer) AppendUint32(n uint32) { - b.b = append(b.b, - byte(n>>24), - byte(n>>16), - byte(n>>8), - byte(n), - ) -} - -func (b *Buffer) AppendUint64(n uint64) { - b.b = append(b.b, - byte(n>>56), - byte(n>>48), - byte(n>>40), - byte(n>>32), - byte(n>>24), - byte(n>>16), - byte(n>>8), - byte(n), - ) -} diff --git a/sdk/messaging/azeventhubs/internal/go-amqp/internal/debug/debug.go b/sdk/messaging/azeventhubs/internal/go-amqp/internal/debug/debug.go deleted file mode 100644 index 3e6821e1f723..000000000000 --- a/sdk/messaging/azeventhubs/internal/go-amqp/internal/debug/debug.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2017 Kale Blankenship -// Portions Copyright (c) Microsoft Corporation - -//go:build !debug -// +build !debug - -package debug - -// dummy functions used when debugging is not enabled - -// Log writes the formatted string to stderr. -// Level indicates the verbosity of the messages to log. -// The greater the value, the more verbose messages will be logged. -func Log(_ int, _ string, _ ...any) {} - -// Assert panics if the specified condition is false. -func Assert(bool) {} - -// Assert panics with the provided message if the specified condition is false. -func Assertf(bool, string, ...any) {} diff --git a/sdk/messaging/azeventhubs/internal/go-amqp/internal/debug/debug_debug.go b/sdk/messaging/azeventhubs/internal/go-amqp/internal/debug/debug_debug.go deleted file mode 100644 index 96d53768a5c9..000000000000 --- a/sdk/messaging/azeventhubs/internal/go-amqp/internal/debug/debug_debug.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (C) 2017 Kale Blankenship -// Portions Copyright (c) Microsoft Corporation - -//go:build debug -// +build debug - -package debug - -import ( - "fmt" - "log" - "os" - "strconv" -) - -var ( - debugLevel = 1 - logger = log.New(os.Stderr, "", log.Lmicroseconds) -) - -func init() { - level, err := strconv.Atoi(os.Getenv("DEBUG_LEVEL")) - if err != nil { - return - } - - debugLevel = level -} - -// Log writes the formatted string to stderr. -// Level indicates the verbosity of the messages to log. -// The greater the value, the more verbose messages will be logged. -func Log(level int, format string, v ...any) { - if level <= debugLevel { - logger.Printf(format, v...) - } -} - -// Assert panics if the specified condition is false. -func Assert(condition bool) { - if !condition { - panic("assertion failed!") - } -} - -// Assert panics with the provided message if the specified condition is false. -func Assertf(condition bool, msg string, v ...any) { - if !condition { - panic(fmt.Sprintf(msg, v...)) - } -} diff --git a/sdk/messaging/azeventhubs/internal/go-amqp/internal/encoding/decode.go b/sdk/messaging/azeventhubs/internal/go-amqp/internal/encoding/decode.go deleted file mode 100644 index 1de2be5f70a9..000000000000 --- a/sdk/messaging/azeventhubs/internal/go-amqp/internal/encoding/decode.go +++ /dev/null @@ -1,1150 +0,0 @@ -// Copyright (C) 2017 Kale Blankenship -// Portions Copyright (c) Microsoft Corporation - -package encoding - -import ( - "encoding/binary" - "errors" - "fmt" - "math" - "reflect" - "time" - - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp/internal/buffer" -) - -// unmarshaler is fulfilled by types that can unmarshal -// themselves from AMQP data. -type unmarshaler interface { - Unmarshal(r *buffer.Buffer) error -} - -// unmarshal decodes AMQP encoded data into i. -// -// The decoding method is based on the type of i. -// -// If i implements unmarshaler, i.Unmarshal() will be called. -// -// Pointers to primitive types will be decoded via the appropriate read[Type] function. -// -// If i is a pointer to a pointer (**Type), it will be dereferenced and a new instance -// of (*Type) is allocated via reflection. -// -// Common map types (map[string]string, map[Symbol]any, and -// map[any]any), will be decoded via conversion to the mapStringAny, -// mapSymbolAny, and mapAnyAny types. -func Unmarshal(r *buffer.Buffer, i any) error { - if tryReadNull(r) { - return nil - } - - switch t := i.(type) { - case *int: - val, err := readInt(r) - if err != nil { - return err - } - *t = val - case *int8: - val, err := readSbyte(r) - if err != nil { - return err - } - *t = val - case *int16: - val, err := readShort(r) - if err != nil { - return err - } - *t = val - case *int32: - val, err := readInt32(r) - if err != nil { - return err - } - *t = val - case *int64: - val, err := readLong(r) - if err != nil { - return err - } - *t = val - case *uint64: - val, err := readUlong(r) - if err != nil { - return err - } - *t = val - case *uint32: - val, err := readUint32(r) - if err != nil { - return err - } - *t = val - case **uint32: // fastpath for uint32 pointer fields - val, err := readUint32(r) - if err != nil { - return err - } - *t = &val - case *uint16: - val, err := readUshort(r) - if err != nil { - return err - } - *t = val - case *uint8: - val, err := ReadUbyte(r) - if err != nil { - return err - } - *t = val - case *float32: - val, err := readFloat(r) - if err != nil { - return err - } - *t = val - case *float64: - val, err := readDouble(r) - if err != nil { - return err - } - *t = val - case *string: - val, err := ReadString(r) - if err != nil { - return err - } - *t = val - case *Symbol: - s, err := ReadString(r) - if err != nil { - return err - } - *t = Symbol(s) - case *[]byte: - val, err := readBinary(r) - if err != nil { - return err - } - *t = val - case *bool: - b, err := readBool(r) - if err != nil { - return err - } - *t = b - case *time.Time: - ts, err := readTimestamp(r) - if err != nil { - return err - } - *t = ts - case *[]int8: - return (*arrayInt8)(t).Unmarshal(r) - case *[]uint16: - return (*arrayUint16)(t).Unmarshal(r) - case *[]int16: - return (*arrayInt16)(t).Unmarshal(r) - case *[]uint32: - return (*arrayUint32)(t).Unmarshal(r) - case *[]int32: - return (*arrayInt32)(t).Unmarshal(r) - case *[]uint64: - return (*arrayUint64)(t).Unmarshal(r) - case *[]int64: - return (*arrayInt64)(t).Unmarshal(r) - case *[]float32: - return (*arrayFloat)(t).Unmarshal(r) - case *[]float64: - return (*arrayDouble)(t).Unmarshal(r) - case *[]bool: - return (*arrayBool)(t).Unmarshal(r) - case *[]string: - return (*arrayString)(t).Unmarshal(r) - case *[]Symbol: - return (*arraySymbol)(t).Unmarshal(r) - case *[][]byte: - return (*arrayBinary)(t).Unmarshal(r) - case *[]time.Time: - return (*arrayTimestamp)(t).Unmarshal(r) - case *[]UUID: - return (*arrayUUID)(t).Unmarshal(r) - case *[]any: - return (*list)(t).Unmarshal(r) - case *map[any]any: - return (*mapAnyAny)(t).Unmarshal(r) - case *map[string]any: - return (*mapStringAny)(t).Unmarshal(r) - case *map[Symbol]any: - return (*mapSymbolAny)(t).Unmarshal(r) - case *DeliveryState: - type_, _, err := PeekMessageType(r.Bytes()) - if err != nil { - return err - } - - switch AMQPType(type_) { - case TypeCodeStateAccepted: - *t = new(StateAccepted) - case TypeCodeStateModified: - *t = new(StateModified) - case TypeCodeStateReceived: - *t = new(StateReceived) - case TypeCodeStateRejected: - *t = new(StateRejected) - case TypeCodeStateReleased: - *t = new(StateReleased) - default: - return fmt.Errorf("unexpected type %d for deliveryState", type_) - } - return Unmarshal(r, *t) - - case *any: - v, err := ReadAny(r) - if err != nil { - return err - } - *t = v - - case unmarshaler: - return t.Unmarshal(r) - default: - // handle **T - v := reflect.Indirect(reflect.ValueOf(i)) - - // can't unmarshal into a non-pointer - if v.Kind() != reflect.Ptr { - return fmt.Errorf("unable to unmarshal %T", i) - } - - // if nil pointer, allocate a new value to - // unmarshal into - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - - return Unmarshal(r, v.Interface()) - } - return nil -} - -// unmarshalComposite is a helper for use in a composite's unmarshal() function. -// -// The composite from r will be unmarshaled into zero or more fields. An error -// will be returned if typ does not match the decoded type. -func UnmarshalComposite(r *buffer.Buffer, type_ AMQPType, fields ...UnmarshalField) error { - cType, numFields, err := readCompositeHeader(r) - if err != nil { - return err - } - - // check type matches expectation - if cType != type_ { - return fmt.Errorf("invalid header %#0x for %#0x", cType, type_) - } - - // Validate the field count is less than or equal to the number of fields - // provided. Fields may be omitted by the sender if they are not set. - if numFields > int64(len(fields)) { - return fmt.Errorf("invalid field count %d for %#0x", numFields, type_) - } - - for i, field := range fields[:numFields] { - // If the field is null and handleNull is set, call it. - if tryReadNull(r) { - if field.HandleNull != nil { - err = field.HandleNull() - if err != nil { - return err - } - } - continue - } - - // Unmarshal each of the received fields. - err = Unmarshal(r, field.Field) - if err != nil { - return fmt.Errorf("unmarshaling field %d: %v", i, err) - } - } - - // check and call handleNull for the remaining fields - for _, field := range fields[numFields:] { - if field.HandleNull != nil { - err = field.HandleNull() - if err != nil { - return err - } - } - } - - return nil -} - -// unmarshalField is a struct that contains a field to be unmarshaled into. -// -// An optional nullHandler can be set. If the composite field being unmarshaled -// is null and handleNull is not nil, nullHandler will be called. -type UnmarshalField struct { - Field any - HandleNull NullHandler -} - -// nullHandler is a function to be called when a composite's field -// is null. -type NullHandler func() error - -func readType(r *buffer.Buffer) (AMQPType, error) { - n, err := r.ReadByte() - return AMQPType(n), err -} - -func peekType(r *buffer.Buffer) (AMQPType, error) { - n, err := r.PeekByte() - return AMQPType(n), err -} - -// readCompositeHeader reads and consumes the composite header from r. -func readCompositeHeader(r *buffer.Buffer) (_ AMQPType, fields int64, _ error) { - type_, err := readType(r) - if err != nil { - return 0, 0, err - } - - // compsites always start with 0x0 - if type_ != 0 { - return 0, 0, fmt.Errorf("invalid composite header %#02x", type_) - } - - // next, the composite type is encoded as an AMQP uint8 - v, err := readUlong(r) - if err != nil { - return 0, 0, err - } - - // fields are represented as a list - fields, err = readListHeader(r) - - return AMQPType(v), fields, err -} - -func readListHeader(r *buffer.Buffer) (length int64, _ error) { - type_, err := readType(r) - if err != nil { - return 0, err - } - - listLength := r.Len() - - switch type_ { - case TypeCodeList0: - return 0, nil - case TypeCodeList8: - buf, ok := r.Next(2) - if !ok { - return 0, errors.New("invalid length") - } - _ = buf[1] - - size := int(buf[0]) - if size > listLength-1 { - return 0, errors.New("invalid length") - } - length = int64(buf[1]) - case TypeCodeList32: - buf, ok := r.Next(8) - if !ok { - return 0, errors.New("invalid length") - } - _ = buf[7] - - size := int(binary.BigEndian.Uint32(buf[:4])) - if size > listLength-4 { - return 0, errors.New("invalid length") - } - length = int64(binary.BigEndian.Uint32(buf[4:8])) - default: - return 0, fmt.Errorf("type code %#02x is not a recognized list type", type_) - } - - return length, nil -} - -func readArrayHeader(r *buffer.Buffer) (length int64, _ error) { - type_, err := readType(r) - if err != nil { - return 0, err - } - - arrayLength := r.Len() - - switch type_ { - case TypeCodeArray8: - buf, ok := r.Next(2) - if !ok { - return 0, errors.New("invalid length") - } - _ = buf[1] - - size := int(buf[0]) - if size > arrayLength-1 { - return 0, errors.New("invalid length") - } - length = int64(buf[1]) - case TypeCodeArray32: - buf, ok := r.Next(8) - if !ok { - return 0, errors.New("invalid length") - } - _ = buf[7] - - size := binary.BigEndian.Uint32(buf[:4]) - if int(size) > arrayLength-4 { - return 0, fmt.Errorf("invalid length for type %02x", type_) - } - length = int64(binary.BigEndian.Uint32(buf[4:8])) - default: - return 0, fmt.Errorf("type code %#02x is not a recognized array type", type_) - } - return length, nil -} - -func ReadString(r *buffer.Buffer) (string, error) { - type_, err := readType(r) - if err != nil { - return "", err - } - - var length int64 - switch type_ { - case TypeCodeStr8, TypeCodeSym8: - n, err := r.ReadByte() - if err != nil { - return "", err - } - length = int64(n) - case TypeCodeStr32, TypeCodeSym32: - buf, ok := r.Next(4) - if !ok { - return "", fmt.Errorf("invalid length for type %#02x", type_) - } - length = int64(binary.BigEndian.Uint32(buf)) - default: - return "", fmt.Errorf("type code %#02x is not a recognized string type", type_) - } - - buf, ok := r.Next(length) - if !ok { - return "", errors.New("invalid length") - } - return string(buf), nil -} - -func readBinary(r *buffer.Buffer) ([]byte, error) { - type_, err := readType(r) - if err != nil { - return nil, err - } - - var length int64 - switch type_ { - case TypeCodeVbin8: - n, err := r.ReadByte() - if err != nil { - return nil, err - } - length = int64(n) - case TypeCodeVbin32: - buf, ok := r.Next(4) - if !ok { - return nil, fmt.Errorf("invalid length for type %#02x", type_) - } - length = int64(binary.BigEndian.Uint32(buf)) - default: - return nil, fmt.Errorf("type code %#02x is not a recognized binary type", type_) - } - - if length == 0 { - // An empty value and a nil value are distinct, - // ensure that the returned value is not nil in this case. - return make([]byte, 0), nil - } - - buf, ok := r.Next(length) - if !ok { - return nil, errors.New("invalid length") - } - return append([]byte(nil), buf...), nil -} - -func ReadAny(r *buffer.Buffer) (any, error) { - if tryReadNull(r) { - return nil, nil - } - - type_, err := peekType(r) - if err != nil { - return nil, errors.New("invalid length") - } - - switch type_ { - // composite - case 0x0: - return readComposite(r) - - // bool - case TypeCodeBool, TypeCodeBoolTrue, TypeCodeBoolFalse: - return readBool(r) - - // uint - case TypeCodeUbyte: - return ReadUbyte(r) - case TypeCodeUshort: - return readUshort(r) - case TypeCodeUint, - TypeCodeSmallUint, - TypeCodeUint0: - return readUint32(r) - case TypeCodeUlong, - TypeCodeSmallUlong, - TypeCodeUlong0: - return readUlong(r) - - // int - case TypeCodeByte: - return readSbyte(r) - case TypeCodeShort: - return readShort(r) - case TypeCodeInt, - TypeCodeSmallint: - return readInt32(r) - case TypeCodeLong, - TypeCodeSmalllong: - return readLong(r) - - // floating point - case TypeCodeFloat: - return readFloat(r) - case TypeCodeDouble: - return readDouble(r) - - // binary - case TypeCodeVbin8, TypeCodeVbin32: - return readBinary(r) - - // strings - case TypeCodeStr8, TypeCodeStr32: - return ReadString(r) - case TypeCodeSym8, TypeCodeSym32: - // symbols currently decoded as string to avoid - // exposing symbol type in message, this may need - // to change if users need to distinguish strings - // from symbols - return ReadString(r) - - // timestamp - case TypeCodeTimestamp: - return readTimestamp(r) - - // UUID - case TypeCodeUUID: - return readUUID(r) - - // arrays - case TypeCodeArray8, TypeCodeArray32: - return readAnyArray(r) - - // lists - case TypeCodeList0, TypeCodeList8, TypeCodeList32: - return readAnyList(r) - - // maps - case TypeCodeMap8: - return readAnyMap(r) - case TypeCodeMap32: - return readAnyMap(r) - - // TODO: implement - case TypeCodeDecimal32: - return nil, errors.New("decimal32 not implemented") - case TypeCodeDecimal64: - return nil, errors.New("decimal64 not implemented") - case TypeCodeDecimal128: - return nil, errors.New("decimal128 not implemented") - case TypeCodeChar: - return nil, errors.New("char not implemented") - default: - return nil, fmt.Errorf("unknown type %#02x", type_) - } -} - -func readAnyMap(r *buffer.Buffer) (any, error) { - var m map[any]any - err := (*mapAnyAny)(&m).Unmarshal(r) - if err != nil { - return nil, err - } - - if len(m) == 0 { - return m, nil - } - - stringKeys := true -Loop: - for key := range m { - switch key.(type) { - case string: - case Symbol: - default: - stringKeys = false - break Loop - } - } - - if stringKeys { - mm := make(map[string]any, len(m)) - for key, value := range m { - switch key := key.(type) { - case string: - mm[key] = value - case Symbol: - mm[string(key)] = value - } - } - return mm, nil - } - - return m, nil -} - -func readAnyList(r *buffer.Buffer) (any, error) { - var a []any - err := (*list)(&a).Unmarshal(r) - return a, err -} - -func readAnyArray(r *buffer.Buffer) (any, error) { - // get the array type - buf := r.Bytes() - if len(buf) < 1 { - return nil, errors.New("invalid length") - } - - var typeIdx int - switch AMQPType(buf[0]) { - case TypeCodeArray8: - typeIdx = 3 - case TypeCodeArray32: - typeIdx = 9 - default: - return nil, fmt.Errorf("invalid array type %02x", buf[0]) - } - if len(buf) < typeIdx+1 { - return nil, errors.New("invalid length") - } - - switch AMQPType(buf[typeIdx]) { - case TypeCodeByte: - var a []int8 - err := (*arrayInt8)(&a).Unmarshal(r) - return a, err - case TypeCodeUbyte: - var a ArrayUByte - err := a.Unmarshal(r) - return a, err - case TypeCodeUshort: - var a []uint16 - err := (*arrayUint16)(&a).Unmarshal(r) - return a, err - case TypeCodeShort: - var a []int16 - err := (*arrayInt16)(&a).Unmarshal(r) - return a, err - case TypeCodeUint0, TypeCodeSmallUint, TypeCodeUint: - var a []uint32 - err := (*arrayUint32)(&a).Unmarshal(r) - return a, err - case TypeCodeSmallint, TypeCodeInt: - var a []int32 - err := (*arrayInt32)(&a).Unmarshal(r) - return a, err - case TypeCodeUlong0, TypeCodeSmallUlong, TypeCodeUlong: - var a []uint64 - err := (*arrayUint64)(&a).Unmarshal(r) - return a, err - case TypeCodeSmalllong, TypeCodeLong: - var a []int64 - err := (*arrayInt64)(&a).Unmarshal(r) - return a, err - case TypeCodeFloat: - var a []float32 - err := (*arrayFloat)(&a).Unmarshal(r) - return a, err - case TypeCodeDouble: - var a []float64 - err := (*arrayDouble)(&a).Unmarshal(r) - return a, err - case TypeCodeBool, TypeCodeBoolTrue, TypeCodeBoolFalse: - var a []bool - err := (*arrayBool)(&a).Unmarshal(r) - return a, err - case TypeCodeStr8, TypeCodeStr32: - var a []string - err := (*arrayString)(&a).Unmarshal(r) - return a, err - case TypeCodeSym8, TypeCodeSym32: - var a []Symbol - err := (*arraySymbol)(&a).Unmarshal(r) - return a, err - case TypeCodeVbin8, TypeCodeVbin32: - var a [][]byte - err := (*arrayBinary)(&a).Unmarshal(r) - return a, err - case TypeCodeTimestamp: - var a []time.Time - err := (*arrayTimestamp)(&a).Unmarshal(r) - return a, err - case TypeCodeUUID: - var a []UUID - err := (*arrayUUID)(&a).Unmarshal(r) - return a, err - default: - return nil, fmt.Errorf("array decoding not implemented for %#02x", buf[typeIdx]) - } -} - -func readComposite(r *buffer.Buffer) (any, error) { - buf := r.Bytes() - - if len(buf) < 2 { - return nil, errors.New("invalid length for composite") - } - - // compsites start with 0x0 - if AMQPType(buf[0]) != 0x0 { - return nil, fmt.Errorf("invalid composite header %#02x", buf[0]) - } - - var compositeType uint64 - switch AMQPType(buf[1]) { - case TypeCodeSmallUlong: - if len(buf) < 3 { - return nil, errors.New("invalid length for smallulong") - } - compositeType = uint64(buf[2]) - case TypeCodeUlong: - if len(buf) < 10 { - return nil, errors.New("invalid length for ulong") - } - compositeType = binary.BigEndian.Uint64(buf[2:]) - } - - if compositeType > math.MaxUint8 { - // try as described type - var dt DescribedType - err := dt.Unmarshal(r) - return dt, err - } - - switch AMQPType(compositeType) { - // Error - case TypeCodeError: - t := new(Error) - err := t.Unmarshal(r) - return t, err - - // Lifetime Policies - case TypeCodeDeleteOnClose: - t := DeleteOnClose - err := t.Unmarshal(r) - return t, err - case TypeCodeDeleteOnNoMessages: - t := DeleteOnNoMessages - err := t.Unmarshal(r) - return t, err - case TypeCodeDeleteOnNoLinks: - t := DeleteOnNoLinks - err := t.Unmarshal(r) - return t, err - case TypeCodeDeleteOnNoLinksOrMessages: - t := DeleteOnNoLinksOrMessages - err := t.Unmarshal(r) - return t, err - - // Delivery States - case TypeCodeStateAccepted: - t := new(StateAccepted) - err := t.Unmarshal(r) - return t, err - case TypeCodeStateModified: - t := new(StateModified) - err := t.Unmarshal(r) - return t, err - case TypeCodeStateReceived: - t := new(StateReceived) - err := t.Unmarshal(r) - return t, err - case TypeCodeStateRejected: - t := new(StateRejected) - err := t.Unmarshal(r) - return t, err - case TypeCodeStateReleased: - t := new(StateReleased) - err := t.Unmarshal(r) - return t, err - - case TypeCodeOpen, - TypeCodeBegin, - TypeCodeAttach, - TypeCodeFlow, - TypeCodeTransfer, - TypeCodeDisposition, - TypeCodeDetach, - TypeCodeEnd, - TypeCodeClose, - TypeCodeSource, - TypeCodeTarget, - TypeCodeMessageHeader, - TypeCodeDeliveryAnnotations, - TypeCodeMessageAnnotations, - TypeCodeMessageProperties, - TypeCodeApplicationProperties, - TypeCodeApplicationData, - TypeCodeAMQPSequence, - TypeCodeAMQPValue, - TypeCodeFooter, - TypeCodeSASLMechanism, - TypeCodeSASLInit, - TypeCodeSASLChallenge, - TypeCodeSASLResponse, - TypeCodeSASLOutcome: - return nil, fmt.Errorf("readComposite unmarshal not implemented for %#02x", compositeType) - - default: - // try as described type - var dt DescribedType - err := dt.Unmarshal(r) - return dt, err - } -} - -func readTimestamp(r *buffer.Buffer) (time.Time, error) { - type_, err := readType(r) - if err != nil { - return time.Time{}, err - } - - if type_ != TypeCodeTimestamp { - return time.Time{}, fmt.Errorf("invalid type for timestamp %02x", type_) - } - - n, err := r.ReadUint64() - ms := int64(n) - return time.Unix(ms/1000, (ms%1000)*1000000).UTC(), err -} - -func readInt(r *buffer.Buffer) (int, error) { - type_, err := peekType(r) - if err != nil { - return 0, err - } - - switch type_ { - // Unsigned - case TypeCodeUbyte: - n, err := ReadUbyte(r) - return int(n), err - case TypeCodeUshort: - n, err := readUshort(r) - return int(n), err - case TypeCodeUint0, TypeCodeSmallUint, TypeCodeUint: - n, err := readUint32(r) - return int(n), err - case TypeCodeUlong0, TypeCodeSmallUlong, TypeCodeUlong: - n, err := readUlong(r) - return int(n), err - - // Signed - case TypeCodeByte: - n, err := readSbyte(r) - return int(n), err - case TypeCodeShort: - n, err := readShort(r) - return int(n), err - case TypeCodeSmallint, TypeCodeInt: - n, err := readInt32(r) - return int(n), err - case TypeCodeSmalllong, TypeCodeLong: - n, err := readLong(r) - return int(n), err - default: - return 0, fmt.Errorf("type code %#02x is not a recognized number type", type_) - } -} - -func readLong(r *buffer.Buffer) (int64, error) { - type_, err := readType(r) - if err != nil { - return 0, err - } - - switch type_ { - case TypeCodeSmalllong: - n, err := r.ReadByte() - return int64(int8(n)), err - case TypeCodeLong: - n, err := r.ReadUint64() - return int64(n), err - default: - return 0, fmt.Errorf("invalid type for uint32 %02x", type_) - } -} - -func readInt32(r *buffer.Buffer) (int32, error) { - type_, err := readType(r) - if err != nil { - return 0, err - } - - switch type_ { - case TypeCodeSmallint: - n, err := r.ReadByte() - return int32(int8(n)), err - case TypeCodeInt: - n, err := r.ReadUint32() - return int32(n), err - default: - return 0, fmt.Errorf("invalid type for int32 %02x", type_) - } -} - -func readShort(r *buffer.Buffer) (int16, error) { - type_, err := readType(r) - if err != nil { - return 0, err - } - - if type_ != TypeCodeShort { - return 0, fmt.Errorf("invalid type for short %02x", type_) - } - - n, err := r.ReadUint16() - return int16(n), err -} - -func readSbyte(r *buffer.Buffer) (int8, error) { - type_, err := readType(r) - if err != nil { - return 0, err - } - - if type_ != TypeCodeByte { - return 0, fmt.Errorf("invalid type for int8 %02x", type_) - } - - n, err := r.ReadByte() - return int8(n), err -} - -func ReadUbyte(r *buffer.Buffer) (uint8, error) { - type_, err := readType(r) - if err != nil { - return 0, err - } - - if type_ != TypeCodeUbyte { - return 0, fmt.Errorf("invalid type for ubyte %02x", type_) - } - - return r.ReadByte() -} - -func readUshort(r *buffer.Buffer) (uint16, error) { - type_, err := readType(r) - if err != nil { - return 0, err - } - - if type_ != TypeCodeUshort { - return 0, fmt.Errorf("invalid type for ushort %02x", type_) - } - - return r.ReadUint16() -} - -func readUint32(r *buffer.Buffer) (uint32, error) { - type_, err := readType(r) - if err != nil { - return 0, err - } - - switch type_ { - case TypeCodeUint0: - return 0, nil - case TypeCodeSmallUint: - n, err := r.ReadByte() - return uint32(n), err - case TypeCodeUint: - return r.ReadUint32() - default: - return 0, fmt.Errorf("invalid type for uint32 %02x", type_) - } -} - -func readUlong(r *buffer.Buffer) (uint64, error) { - type_, err := readType(r) - if err != nil { - return 0, err - } - - switch type_ { - case TypeCodeUlong0: - return 0, nil - case TypeCodeSmallUlong: - n, err := r.ReadByte() - return uint64(n), err - case TypeCodeUlong: - return r.ReadUint64() - default: - return 0, fmt.Errorf("invalid type for uint32 %02x", type_) - } -} - -func readFloat(r *buffer.Buffer) (float32, error) { - type_, err := readType(r) - if err != nil { - return 0, err - } - - if type_ != TypeCodeFloat { - return 0, fmt.Errorf("invalid type for float32 %02x", type_) - } - - bits, err := r.ReadUint32() - return math.Float32frombits(bits), err -} - -func readDouble(r *buffer.Buffer) (float64, error) { - type_, err := readType(r) - if err != nil { - return 0, err - } - - if type_ != TypeCodeDouble { - return 0, fmt.Errorf("invalid type for float64 %02x", type_) - } - - bits, err := r.ReadUint64() - return math.Float64frombits(bits), err -} - -func readBool(r *buffer.Buffer) (bool, error) { - type_, err := readType(r) - if err != nil { - return false, err - } - - switch type_ { - case TypeCodeBool: - b, err := r.ReadByte() - return b != 0, err - case TypeCodeBoolTrue: - return true, nil - case TypeCodeBoolFalse: - return false, nil - default: - return false, fmt.Errorf("type code %#02x is not a recognized bool type", type_) - } -} - -func readUint(r *buffer.Buffer) (value uint64, _ error) { - type_, err := readType(r) - if err != nil { - return 0, err - } - - switch type_ { - case TypeCodeUint0, TypeCodeUlong0: - return 0, nil - case TypeCodeUbyte, TypeCodeSmallUint, TypeCodeSmallUlong: - n, err := r.ReadByte() - return uint64(n), err - case TypeCodeUshort: - n, err := r.ReadUint16() - return uint64(n), err - case TypeCodeUint: - n, err := r.ReadUint32() - return uint64(n), err - case TypeCodeUlong: - return r.ReadUint64() - default: - return 0, fmt.Errorf("type code %#02x is not a recognized number type", type_) - } -} - -func readUUID(r *buffer.Buffer) (UUID, error) { - var uuid UUID - - type_, err := readType(r) - if err != nil { - return uuid, err - } - - if type_ != TypeCodeUUID { - return uuid, fmt.Errorf("type code %#00x is not a UUID", type_) - } - - buf, ok := r.Next(16) - if !ok { - return uuid, errors.New("invalid length") - } - copy(uuid[:], buf) - - return uuid, nil -} - -func readMapHeader(r *buffer.Buffer) (count uint32, _ error) { - type_, err := readType(r) - if err != nil { - return 0, err - } - - length := r.Len() - - switch type_ { - case TypeCodeMap8: - buf, ok := r.Next(2) - if !ok { - return 0, errors.New("invalid length") - } - _ = buf[1] - - size := int(buf[0]) - if size > length-1 { - return 0, errors.New("invalid length") - } - count = uint32(buf[1]) - case TypeCodeMap32: - buf, ok := r.Next(8) - if !ok { - return 0, errors.New("invalid length") - } - _ = buf[7] - - size := int(binary.BigEndian.Uint32(buf[:4])) - if size > length-4 { - return 0, errors.New("invalid length") - } - count = binary.BigEndian.Uint32(buf[4:8]) - default: - return 0, fmt.Errorf("invalid map type %#02x", type_) - } - - if int(count) > r.Len() { - return 0, errors.New("invalid length") - } - return count, nil -} diff --git a/sdk/messaging/azeventhubs/internal/go-amqp/internal/encoding/encode.go b/sdk/messaging/azeventhubs/internal/go-amqp/internal/encoding/encode.go deleted file mode 100644 index 1103c84f2b26..000000000000 --- a/sdk/messaging/azeventhubs/internal/go-amqp/internal/encoding/encode.go +++ /dev/null @@ -1,573 +0,0 @@ -// Copyright (C) 2017 Kale Blankenship -// Portions Copyright (c) Microsoft Corporation - -package encoding - -import ( - "encoding/binary" - "errors" - "fmt" - "math" - "time" - "unicode/utf8" - - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp/internal/buffer" -) - -type marshaler interface { - Marshal(*buffer.Buffer) error -} - -func Marshal(wr *buffer.Buffer, i any) error { - switch t := i.(type) { - case nil: - wr.AppendByte(byte(TypeCodeNull)) - case bool: - if t { - wr.AppendByte(byte(TypeCodeBoolTrue)) - } else { - wr.AppendByte(byte(TypeCodeBoolFalse)) - } - case *bool: - if *t { - wr.AppendByte(byte(TypeCodeBoolTrue)) - } else { - wr.AppendByte(byte(TypeCodeBoolFalse)) - } - case uint: - writeUint64(wr, uint64(t)) - case *uint: - writeUint64(wr, uint64(*t)) - case uint64: - writeUint64(wr, t) - case *uint64: - writeUint64(wr, *t) - case uint32: - writeUint32(wr, t) - case *uint32: - writeUint32(wr, *t) - case uint16: - wr.AppendByte(byte(TypeCodeUshort)) - wr.AppendUint16(t) - case *uint16: - wr.AppendByte(byte(TypeCodeUshort)) - wr.AppendUint16(*t) - case uint8: - wr.Append([]byte{ - byte(TypeCodeUbyte), - t, - }) - case *uint8: - wr.Append([]byte{ - byte(TypeCodeUbyte), - *t, - }) - case int: - writeInt64(wr, int64(t)) - case *int: - writeInt64(wr, int64(*t)) - case int8: - wr.Append([]byte{ - byte(TypeCodeByte), - uint8(t), - }) - case *int8: - wr.Append([]byte{ - byte(TypeCodeByte), - uint8(*t), - }) - case int16: - wr.AppendByte(byte(TypeCodeShort)) - wr.AppendUint16(uint16(t)) - case *int16: - wr.AppendByte(byte(TypeCodeShort)) - wr.AppendUint16(uint16(*t)) - case int32: - writeInt32(wr, t) - case *int32: - writeInt32(wr, *t) - case int64: - writeInt64(wr, t) - case *int64: - writeInt64(wr, *t) - case float32: - writeFloat(wr, t) - case *float32: - writeFloat(wr, *t) - case float64: - writeDouble(wr, t) - case *float64: - writeDouble(wr, *t) - case string: - return writeString(wr, t) - case *string: - return writeString(wr, *t) - case []byte: - return WriteBinary(wr, t) - case *[]byte: - return WriteBinary(wr, *t) - case map[any]any: - return writeMap(wr, t) - case *map[any]any: - return writeMap(wr, *t) - case map[string]any: - return writeMap(wr, t) - case *map[string]any: - return writeMap(wr, *t) - case map[Symbol]any: - return writeMap(wr, t) - case *map[Symbol]any: - return writeMap(wr, *t) - case Unsettled: - return writeMap(wr, t) - case *Unsettled: - return writeMap(wr, *t) - case time.Time: - writeTimestamp(wr, t) - case *time.Time: - writeTimestamp(wr, *t) - case []int8: - return arrayInt8(t).Marshal(wr) - case *[]int8: - return arrayInt8(*t).Marshal(wr) - case []uint16: - return arrayUint16(t).Marshal(wr) - case *[]uint16: - return arrayUint16(*t).Marshal(wr) - case []int16: - return arrayInt16(t).Marshal(wr) - case *[]int16: - return arrayInt16(*t).Marshal(wr) - case []uint32: - return arrayUint32(t).Marshal(wr) - case *[]uint32: - return arrayUint32(*t).Marshal(wr) - case []int32: - return arrayInt32(t).Marshal(wr) - case *[]int32: - return arrayInt32(*t).Marshal(wr) - case []uint64: - return arrayUint64(t).Marshal(wr) - case *[]uint64: - return arrayUint64(*t).Marshal(wr) - case []int64: - return arrayInt64(t).Marshal(wr) - case *[]int64: - return arrayInt64(*t).Marshal(wr) - case []float32: - return arrayFloat(t).Marshal(wr) - case *[]float32: - return arrayFloat(*t).Marshal(wr) - case []float64: - return arrayDouble(t).Marshal(wr) - case *[]float64: - return arrayDouble(*t).Marshal(wr) - case []bool: - return arrayBool(t).Marshal(wr) - case *[]bool: - return arrayBool(*t).Marshal(wr) - case []string: - return arrayString(t).Marshal(wr) - case *[]string: - return arrayString(*t).Marshal(wr) - case []Symbol: - return arraySymbol(t).Marshal(wr) - case *[]Symbol: - return arraySymbol(*t).Marshal(wr) - case [][]byte: - return arrayBinary(t).Marshal(wr) - case *[][]byte: - return arrayBinary(*t).Marshal(wr) - case []time.Time: - return arrayTimestamp(t).Marshal(wr) - case *[]time.Time: - return arrayTimestamp(*t).Marshal(wr) - case []UUID: - return arrayUUID(t).Marshal(wr) - case *[]UUID: - return arrayUUID(*t).Marshal(wr) - case []any: - return list(t).Marshal(wr) - case *[]any: - return list(*t).Marshal(wr) - case marshaler: - return t.Marshal(wr) - default: - return fmt.Errorf("marshal not implemented for %T", i) - } - return nil -} - -func writeInt32(wr *buffer.Buffer, n int32) { - if n < 128 && n >= -128 { - wr.Append([]byte{ - byte(TypeCodeSmallint), - byte(n), - }) - return - } - - wr.AppendByte(byte(TypeCodeInt)) - wr.AppendUint32(uint32(n)) -} - -func writeInt64(wr *buffer.Buffer, n int64) { - if n < 128 && n >= -128 { - wr.Append([]byte{ - byte(TypeCodeSmalllong), - byte(n), - }) - return - } - - wr.AppendByte(byte(TypeCodeLong)) - wr.AppendUint64(uint64(n)) -} - -func writeUint32(wr *buffer.Buffer, n uint32) { - if n == 0 { - wr.AppendByte(byte(TypeCodeUint0)) - return - } - - if n < 256 { - wr.Append([]byte{ - byte(TypeCodeSmallUint), - byte(n), - }) - return - } - - wr.AppendByte(byte(TypeCodeUint)) - wr.AppendUint32(n) -} - -func writeUint64(wr *buffer.Buffer, n uint64) { - if n == 0 { - wr.AppendByte(byte(TypeCodeUlong0)) - return - } - - if n < 256 { - wr.Append([]byte{ - byte(TypeCodeSmallUlong), - byte(n), - }) - return - } - - wr.AppendByte(byte(TypeCodeUlong)) - wr.AppendUint64(n) -} - -func writeFloat(wr *buffer.Buffer, f float32) { - wr.AppendByte(byte(TypeCodeFloat)) - wr.AppendUint32(math.Float32bits(f)) -} - -func writeDouble(wr *buffer.Buffer, f float64) { - wr.AppendByte(byte(TypeCodeDouble)) - wr.AppendUint64(math.Float64bits(f)) -} - -func writeTimestamp(wr *buffer.Buffer, t time.Time) { - wr.AppendByte(byte(TypeCodeTimestamp)) - ms := t.UnixNano() / int64(time.Millisecond) - wr.AppendUint64(uint64(ms)) -} - -// marshalField is a field to be marshaled -type MarshalField struct { - Value any // value to be marshaled, use pointers to avoid interface conversion overhead - Omit bool // indicates that this field should be omitted (set to null) -} - -// marshalComposite is a helper for us in a composite's marshal() function. -// -// The returned bytes include the composite header and fields. Fields with -// omit set to true will be encoded as null or omitted altogether if there are -// no non-null fields after them. -func MarshalComposite(wr *buffer.Buffer, code AMQPType, fields []MarshalField) error { - // lastSetIdx is the last index to have a non-omitted field. - // start at -1 as it's possible to have no fields in a composite - lastSetIdx := -1 - - // marshal each field into it's index in rawFields, - // null fields are skipped, leaving the index nil. - for i, f := range fields { - if f.Omit { - continue - } - lastSetIdx = i - } - - // write header only - if lastSetIdx == -1 { - wr.Append([]byte{ - 0x0, - byte(TypeCodeSmallUlong), - byte(code), - byte(TypeCodeList0), - }) - return nil - } - - // write header - WriteDescriptor(wr, code) - - // write fields - wr.AppendByte(byte(TypeCodeList32)) - - // write temp size, replace later - sizeIdx := wr.Len() - wr.Append([]byte{0, 0, 0, 0}) - preFieldLen := wr.Len() - - // field count - wr.AppendUint32(uint32(lastSetIdx + 1)) - - // write null to each index up to lastSetIdx - for _, f := range fields[:lastSetIdx+1] { - if f.Omit { - wr.AppendByte(byte(TypeCodeNull)) - continue - } - err := Marshal(wr, f.Value) - if err != nil { - return err - } - } - - // fix size - size := uint32(wr.Len() - preFieldLen) - buf := wr.Bytes() - binary.BigEndian.PutUint32(buf[sizeIdx:], size) - - return nil -} - -func WriteDescriptor(wr *buffer.Buffer, code AMQPType) { - wr.Append([]byte{ - 0x0, - byte(TypeCodeSmallUlong), - byte(code), - }) -} - -func writeString(wr *buffer.Buffer, str string) error { - if !utf8.ValidString(str) { - return errors.New("not a valid UTF-8 string") - } - l := len(str) - - switch { - // Str8 - case l < 256: - wr.Append([]byte{ - byte(TypeCodeStr8), - byte(l), - }) - wr.AppendString(str) - return nil - - // Str32 - case uint(l) < math.MaxUint32: - wr.AppendByte(byte(TypeCodeStr32)) - wr.AppendUint32(uint32(l)) - wr.AppendString(str) - return nil - - default: - return errors.New("too long") - } -} - -func WriteBinary(wr *buffer.Buffer, bin []byte) error { - l := len(bin) - - switch { - // List8 - case l < 256: - wr.Append([]byte{ - byte(TypeCodeVbin8), - byte(l), - }) - wr.Append(bin) - return nil - - // List32 - case uint(l) < math.MaxUint32: - wr.AppendByte(byte(TypeCodeVbin32)) - wr.AppendUint32(uint32(l)) - wr.Append(bin) - return nil - - default: - return errors.New("too long") - } -} - -func writeMap(wr *buffer.Buffer, m any) error { - startIdx := wr.Len() - wr.Append([]byte{ - byte(TypeCodeMap32), // type - 0, 0, 0, 0, // size placeholder - 0, 0, 0, 0, // length placeholder - }) - - var pairs int - switch m := m.(type) { - case map[any]any: - pairs = len(m) * 2 - for key, val := range m { - err := Marshal(wr, key) - if err != nil { - return err - } - err = Marshal(wr, val) - if err != nil { - return err - } - } - case map[string]any: - pairs = len(m) * 2 - for key, val := range m { - err := writeString(wr, key) - if err != nil { - return err - } - err = Marshal(wr, val) - if err != nil { - return err - } - } - case map[Symbol]any: - pairs = len(m) * 2 - for key, val := range m { - err := key.Marshal(wr) - if err != nil { - return err - } - err = Marshal(wr, val) - if err != nil { - return err - } - } - case Unsettled: - pairs = len(m) * 2 - for key, val := range m { - err := writeString(wr, key) - if err != nil { - return err - } - err = Marshal(wr, val) - if err != nil { - return err - } - } - case Filter: - pairs = len(m) * 2 - for key, val := range m { - err := key.Marshal(wr) - if err != nil { - return err - } - err = val.Marshal(wr) - if err != nil { - return err - } - } - case Annotations: - pairs = len(m) * 2 - for key, val := range m { - switch key := key.(type) { - case string: - err := Symbol(key).Marshal(wr) - if err != nil { - return err - } - case Symbol: - err := key.Marshal(wr) - if err != nil { - return err - } - case int64: - writeInt64(wr, key) - case int: - writeInt64(wr, int64(key)) - default: - return fmt.Errorf("unsupported Annotations key type %T", key) - } - - err := Marshal(wr, val) - if err != nil { - return err - } - } - default: - return fmt.Errorf("unsupported map type %T", m) - } - - if uint(pairs) > math.MaxUint32-4 { - return errors.New("map contains too many elements") - } - - // overwrite placeholder size and length - bytes := wr.Bytes()[startIdx+1 : startIdx+9] - _ = bytes[7] // bounds check hint - - length := wr.Len() - startIdx - 1 - 4 // -1 for type, -4 for length - binary.BigEndian.PutUint32(bytes[:4], uint32(length)) - binary.BigEndian.PutUint32(bytes[4:8], uint32(pairs)) - - return nil -} - -// type length sizes -const ( - array8TLSize = 2 - array32TLSize = 5 -) - -func writeArrayHeader(wr *buffer.Buffer, length, typeSize int, type_ AMQPType) { - size := length * typeSize - - // array type - if size+array8TLSize <= math.MaxUint8 { - wr.Append([]byte{ - byte(TypeCodeArray8), // type - byte(size + array8TLSize), // size - byte(length), // length - byte(type_), // element type - }) - } else { - wr.AppendByte(byte(TypeCodeArray32)) //type - wr.AppendUint32(uint32(size + array32TLSize)) // size - wr.AppendUint32(uint32(length)) // length - wr.AppendByte(byte(type_)) // element type - } -} - -func writeVariableArrayHeader(wr *buffer.Buffer, length, elementsSizeTotal int, type_ AMQPType) { - // 0xA_ == 1, 0xB_ == 4 - // http://docs.oasis-open.org/amqp/core/v1.0/os/amqp-core-types-v1.0-os.html#doc-idp82960 - elementTypeSize := 1 - if type_&0xf0 == 0xb0 { - elementTypeSize = 4 - } - - size := elementsSizeTotal + (length * elementTypeSize) // size excluding array length - if size+array8TLSize <= math.MaxUint8 { - wr.Append([]byte{ - byte(TypeCodeArray8), // type - byte(size + array8TLSize), // size - byte(length), // length - byte(type_), // element type - }) - } else { - wr.AppendByte(byte(TypeCodeArray32)) // type - wr.AppendUint32(uint32(size + array32TLSize)) // size - wr.AppendUint32(uint32(length)) // length - wr.AppendByte(byte(type_)) // element type - } -} diff --git a/sdk/messaging/azeventhubs/internal/go-amqp/internal/encoding/types.go b/sdk/messaging/azeventhubs/internal/go-amqp/internal/encoding/types.go deleted file mode 100644 index 5196d49b4d4c..000000000000 --- a/sdk/messaging/azeventhubs/internal/go-amqp/internal/encoding/types.go +++ /dev/null @@ -1,2155 +0,0 @@ -// Copyright (C) 2017 Kale Blankenship -// Portions Copyright (c) Microsoft Corporation - -package encoding - -import ( - "encoding/binary" - "encoding/hex" - "errors" - "fmt" - "math" - "reflect" - "time" - "unicode/utf8" - - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp/internal/buffer" -) - -type AMQPType uint8 - -// Type codes -const ( - TypeCodeNull AMQPType = 0x40 - - // Bool - TypeCodeBool AMQPType = 0x56 // boolean with the octet 0x00 being false and octet 0x01 being true - TypeCodeBoolTrue AMQPType = 0x41 - TypeCodeBoolFalse AMQPType = 0x42 - - // Unsigned - TypeCodeUbyte AMQPType = 0x50 // 8-bit unsigned integer (1) - TypeCodeUshort AMQPType = 0x60 // 16-bit unsigned integer in network byte order (2) - TypeCodeUint AMQPType = 0x70 // 32-bit unsigned integer in network byte order (4) - TypeCodeSmallUint AMQPType = 0x52 // unsigned integer value in the range 0 to 255 inclusive (1) - TypeCodeUint0 AMQPType = 0x43 // the uint value 0 (0) - TypeCodeUlong AMQPType = 0x80 // 64-bit unsigned integer in network byte order (8) - TypeCodeSmallUlong AMQPType = 0x53 // unsigned long value in the range 0 to 255 inclusive (1) - TypeCodeUlong0 AMQPType = 0x44 // the ulong value 0 (0) - - // Signed - TypeCodeByte AMQPType = 0x51 // 8-bit two's-complement integer (1) - TypeCodeShort AMQPType = 0x61 // 16-bit two's-complement integer in network byte order (2) - TypeCodeInt AMQPType = 0x71 // 32-bit two's-complement integer in network byte order (4) - TypeCodeSmallint AMQPType = 0x54 // 8-bit two's-complement integer (1) - TypeCodeLong AMQPType = 0x81 // 64-bit two's-complement integer in network byte order (8) - TypeCodeSmalllong AMQPType = 0x55 // 8-bit two's-complement integer - - // Decimal - TypeCodeFloat AMQPType = 0x72 // IEEE 754-2008 binary32 (4) - TypeCodeDouble AMQPType = 0x82 // IEEE 754-2008 binary64 (8) - TypeCodeDecimal32 AMQPType = 0x74 // IEEE 754-2008 decimal32 using the Binary Integer Decimal encoding (4) - TypeCodeDecimal64 AMQPType = 0x84 // IEEE 754-2008 decimal64 using the Binary Integer Decimal encoding (8) - TypeCodeDecimal128 AMQPType = 0x94 // IEEE 754-2008 decimal128 using the Binary Integer Decimal encoding (16) - - // Other - TypeCodeChar AMQPType = 0x73 // a UTF-32BE encoded Unicode character (4) - TypeCodeTimestamp AMQPType = 0x83 // 64-bit two's-complement integer representing milliseconds since the unix epoch - TypeCodeUUID AMQPType = 0x98 // UUID as defined in section 4.1.2 of RFC-4122 - - // Variable Length - TypeCodeVbin8 AMQPType = 0xa0 // up to 2^8 - 1 octets of binary data (1 + variable) - TypeCodeVbin32 AMQPType = 0xb0 // up to 2^32 - 1 octets of binary data (4 + variable) - TypeCodeStr8 AMQPType = 0xa1 // up to 2^8 - 1 octets worth of UTF-8 Unicode (with no byte order mark) (1 + variable) - TypeCodeStr32 AMQPType = 0xb1 // up to 2^32 - 1 octets worth of UTF-8 Unicode (with no byte order mark) (4 +variable) - TypeCodeSym8 AMQPType = 0xa3 // up to 2^8 - 1 seven bit ASCII characters representing a symbolic value (1 + variable) - TypeCodeSym32 AMQPType = 0xb3 // up to 2^32 - 1 seven bit ASCII characters representing a symbolic value (4 + variable) - - // Compound - TypeCodeList0 AMQPType = 0x45 // the empty list (i.e. the list with no elements) (0) - TypeCodeList8 AMQPType = 0xc0 // up to 2^8 - 1 list elements with total size less than 2^8 octets (1 + compound) - TypeCodeList32 AMQPType = 0xd0 // up to 2^32 - 1 list elements with total size less than 2^32 octets (4 + compound) - TypeCodeMap8 AMQPType = 0xc1 // up to 2^8 - 1 octets of encoded map data (1 + compound) - TypeCodeMap32 AMQPType = 0xd1 // up to 2^32 - 1 octets of encoded map data (4 + compound) - TypeCodeArray8 AMQPType = 0xe0 // up to 2^8 - 1 array elements with total size less than 2^8 octets (1 + array) - TypeCodeArray32 AMQPType = 0xf0 // up to 2^32 - 1 array elements with total size less than 2^32 octets (4 + array) - - // Composites - TypeCodeOpen AMQPType = 0x10 - TypeCodeBegin AMQPType = 0x11 - TypeCodeAttach AMQPType = 0x12 - TypeCodeFlow AMQPType = 0x13 - TypeCodeTransfer AMQPType = 0x14 - TypeCodeDisposition AMQPType = 0x15 - TypeCodeDetach AMQPType = 0x16 - TypeCodeEnd AMQPType = 0x17 - TypeCodeClose AMQPType = 0x18 - - TypeCodeSource AMQPType = 0x28 - TypeCodeTarget AMQPType = 0x29 - TypeCodeError AMQPType = 0x1d - - TypeCodeMessageHeader AMQPType = 0x70 - TypeCodeDeliveryAnnotations AMQPType = 0x71 - TypeCodeMessageAnnotations AMQPType = 0x72 - TypeCodeMessageProperties AMQPType = 0x73 - TypeCodeApplicationProperties AMQPType = 0x74 - TypeCodeApplicationData AMQPType = 0x75 - TypeCodeAMQPSequence AMQPType = 0x76 - TypeCodeAMQPValue AMQPType = 0x77 - TypeCodeFooter AMQPType = 0x78 - - TypeCodeStateReceived AMQPType = 0x23 - TypeCodeStateAccepted AMQPType = 0x24 - TypeCodeStateRejected AMQPType = 0x25 - TypeCodeStateReleased AMQPType = 0x26 - TypeCodeStateModified AMQPType = 0x27 - - TypeCodeSASLMechanism AMQPType = 0x40 - TypeCodeSASLInit AMQPType = 0x41 - TypeCodeSASLChallenge AMQPType = 0x42 - TypeCodeSASLResponse AMQPType = 0x43 - TypeCodeSASLOutcome AMQPType = 0x44 - - TypeCodeDeleteOnClose AMQPType = 0x2b - TypeCodeDeleteOnNoLinks AMQPType = 0x2c - TypeCodeDeleteOnNoMessages AMQPType = 0x2d - TypeCodeDeleteOnNoLinksOrMessages AMQPType = 0x2e -) - -// Durability Policies -const ( - // No terminus state is retained durably. - DurabilityNone Durability = 0 - - // Only the existence and configuration of the terminus is - // retained durably. - DurabilityConfiguration Durability = 1 - - // In addition to the existence and configuration of the - // terminus, the unsettled state for durable messages is - // retained durably. - DurabilityUnsettledState Durability = 2 -) - -// Durability specifies the durability of a link. -type Durability uint32 - -func (d *Durability) String() string { - if d == nil { - return "" - } - - switch *d { - case DurabilityNone: - return "none" - case DurabilityConfiguration: - return "configuration" - case DurabilityUnsettledState: - return "unsettled-state" - default: - return fmt.Sprintf("unknown durability %d", *d) - } -} - -func (d Durability) Marshal(wr *buffer.Buffer) error { - return Marshal(wr, uint32(d)) -} - -func (d *Durability) Unmarshal(r *buffer.Buffer) error { - return Unmarshal(r, (*uint32)(d)) -} - -// Expiry Policies -const ( - // The expiry timer starts when terminus is detached. - ExpiryLinkDetach ExpiryPolicy = "link-detach" - - // The expiry timer starts when the most recently - // associated session is ended. - ExpirySessionEnd ExpiryPolicy = "session-end" - - // The expiry timer starts when most recently associated - // connection is closed. - ExpiryConnectionClose ExpiryPolicy = "connection-close" - - // The terminus never expires. - ExpiryNever ExpiryPolicy = "never" -) - -// ExpiryPolicy specifies when the expiry timer of a terminus -// starts counting down from the timeout value. -// -// If the link is subsequently re-attached before the terminus is expired, -// then the count down is aborted. If the conditions for the -// terminus-expiry-policy are subsequently re-met, the expiry timer restarts -// from its originally configured timeout value. -type ExpiryPolicy Symbol - -func ValidateExpiryPolicy(e ExpiryPolicy) error { - switch e { - case ExpiryLinkDetach, - ExpirySessionEnd, - ExpiryConnectionClose, - ExpiryNever: - return nil - default: - return fmt.Errorf("unknown expiry-policy %q", e) - } -} - -func (e ExpiryPolicy) Marshal(wr *buffer.Buffer) error { - return Symbol(e).Marshal(wr) -} - -func (e *ExpiryPolicy) Unmarshal(r *buffer.Buffer) error { - err := Unmarshal(r, (*Symbol)(e)) - if err != nil { - return err - } - return ValidateExpiryPolicy(*e) -} - -func (e *ExpiryPolicy) String() string { - if e == nil { - return "" - } - return string(*e) -} - -// Sender Settlement Modes -const ( - // Sender will send all deliveries initially unsettled to the receiver. - SenderSettleModeUnsettled SenderSettleMode = 0 - - // Sender will send all deliveries settled to the receiver. - SenderSettleModeSettled SenderSettleMode = 1 - - // Sender MAY send a mixture of settled and unsettled deliveries to the receiver. - SenderSettleModeMixed SenderSettleMode = 2 -) - -// SenderSettleMode specifies how the sender will settle messages. -type SenderSettleMode uint8 - -func (m SenderSettleMode) Ptr() *SenderSettleMode { - return &m -} - -func (m *SenderSettleMode) String() string { - if m == nil { - return "" - } - - switch *m { - case SenderSettleModeUnsettled: - return "unsettled" - - case SenderSettleModeSettled: - return "settled" - - case SenderSettleModeMixed: - return "mixed" - - default: - return fmt.Sprintf("unknown sender mode %d", uint8(*m)) - } -} - -func (m SenderSettleMode) Marshal(wr *buffer.Buffer) error { - return Marshal(wr, uint8(m)) -} - -func (m *SenderSettleMode) Unmarshal(r *buffer.Buffer) error { - n, err := ReadUbyte(r) - *m = SenderSettleMode(n) - return err -} - -// Receiver Settlement Modes -const ( - // Receiver will spontaneously settle all incoming transfers. - ReceiverSettleModeFirst ReceiverSettleMode = 0 - - // Receiver will only settle after sending the disposition to the - // sender and receiving a disposition indicating settlement of - // the delivery from the sender. - ReceiverSettleModeSecond ReceiverSettleMode = 1 -) - -// ReceiverSettleMode specifies how the receiver will settle messages. -type ReceiverSettleMode uint8 - -func (m ReceiverSettleMode) Ptr() *ReceiverSettleMode { - return &m -} - -func (m *ReceiverSettleMode) String() string { - if m == nil { - return "" - } - - switch *m { - case ReceiverSettleModeFirst: - return "first" - - case ReceiverSettleModeSecond: - return "second" - - default: - return fmt.Sprintf("unknown receiver mode %d", uint8(*m)) - } -} - -func (m ReceiverSettleMode) Marshal(wr *buffer.Buffer) error { - return Marshal(wr, uint8(m)) -} - -func (m *ReceiverSettleMode) Unmarshal(r *buffer.Buffer) error { - n, err := ReadUbyte(r) - *m = ReceiverSettleMode(n) - return err -} - -type Role bool - -const ( - RoleSender Role = false - RoleReceiver Role = true -) - -func (rl Role) String() string { - if rl { - return "Receiver" - } - return "Sender" -} - -func (rl *Role) Unmarshal(r *buffer.Buffer) error { - b, err := readBool(r) - *rl = Role(b) - return err -} - -func (rl Role) Marshal(wr *buffer.Buffer) error { - return Marshal(wr, (bool)(rl)) -} - -type SASLCode uint8 - -// SASL Codes -const ( - CodeSASLOK SASLCode = iota // Connection authentication succeeded. - CodeSASLAuth // Connection authentication failed due to an unspecified problem with the supplied credentials. - CodeSASLSysPerm // Connection authentication failed due to a system error that is unlikely to be corrected without intervention. -) - -func (s SASLCode) Marshal(wr *buffer.Buffer) error { - return Marshal(wr, uint8(s)) -} - -func (s *SASLCode) Unmarshal(r *buffer.Buffer) error { - n, err := ReadUbyte(r) - *s = SASLCode(n) - return err -} - -// DeliveryState encapsulates the various concrete delivery states. -// http://docs.oasis-open.org/amqp/core/v1.0/os/amqp-core-messaging-v1.0-os.html#section-delivery-state -// TODO: http://docs.oasis-open.org/amqp/core/v1.0/os/amqp-core-transactions-v1.0-os.html#type-declared -type DeliveryState interface { - deliveryState() // marker method -} - -type Unsettled map[string]DeliveryState - -func (u Unsettled) Marshal(wr *buffer.Buffer) error { - return writeMap(wr, u) -} - -func (u *Unsettled) Unmarshal(r *buffer.Buffer) error { - count, err := readMapHeader(r) - if err != nil { - return err - } - - m := make(Unsettled, count/2) - for i := uint32(0); i < count; i += 2 { - key, err := ReadString(r) - if err != nil { - return err - } - var value DeliveryState - err = Unmarshal(r, &value) - if err != nil { - return err - } - m[key] = value - } - *u = m - return nil -} - -type Filter map[Symbol]*DescribedType - -func (f Filter) Marshal(wr *buffer.Buffer) error { - return writeMap(wr, f) -} - -func (f *Filter) Unmarshal(r *buffer.Buffer) error { - count, err := readMapHeader(r) - if err != nil { - return err - } - - m := make(Filter, count/2) - for i := uint32(0); i < count; i += 2 { - key, err := ReadString(r) - if err != nil { - return err - } - var value DescribedType - err = Unmarshal(r, &value) - if err != nil { - return err - } - m[Symbol(key)] = &value - } - *f = m - return nil -} - -// peekMessageType reads the message type without -// modifying any data. -func PeekMessageType(buf []byte) (uint8, uint8, error) { - if len(buf) < 3 { - return 0, 0, errors.New("invalid message") - } - - if buf[0] != 0 { - return 0, 0, fmt.Errorf("invalid composite header %02x", buf[0]) - } - - // copied from readUlong to avoid allocations - t := AMQPType(buf[1]) - if t == TypeCodeUlong0 { - return 0, 2, nil - } - - if t == TypeCodeSmallUlong { - if len(buf[2:]) == 0 { - return 0, 0, errors.New("invalid ulong") - } - return buf[2], 3, nil - } - - if t != TypeCodeUlong { - return 0, 0, fmt.Errorf("invalid type for uint32 %02x", t) - } - - if len(buf[2:]) < 8 { - return 0, 0, errors.New("invalid ulong") - } - v := binary.BigEndian.Uint64(buf[2:10]) - - return uint8(v), 10, nil -} - -func tryReadNull(r *buffer.Buffer) bool { - if r.Len() > 0 && AMQPType(r.Bytes()[0]) == TypeCodeNull { - r.Skip(1) - return true - } - return false -} - -// Annotations keys must be of type string, int, or int64. -// -// String keys are encoded as AMQP Symbols. -type Annotations map[any]any - -func (a Annotations) Marshal(wr *buffer.Buffer) error { - return writeMap(wr, a) -} - -func (a *Annotations) Unmarshal(r *buffer.Buffer) error { - count, err := readMapHeader(r) - if err != nil { - return err - } - - m := make(Annotations, count/2) - for i := uint32(0); i < count; i += 2 { - key, err := ReadAny(r) - if err != nil { - return err - } - value, err := ReadAny(r) - if err != nil { - return err - } - m[key] = value - } - *a = m - return nil -} - -// ErrCond is one of the error conditions defined in the AMQP spec. -type ErrCond string - -func (ec ErrCond) Marshal(wr *buffer.Buffer) error { - return (Symbol)(ec).Marshal(wr) -} - -func (ec *ErrCond) Unmarshal(r *buffer.Buffer) error { - s, err := ReadString(r) - *ec = ErrCond(s) - return err -} - -/* - - - - - - -*/ - -// Error is an AMQP error. -type Error struct { - // A symbolic value indicating the error condition. - Condition ErrCond - - // descriptive text about the error condition - // - // This text supplies any supplementary details not indicated by the condition field. - // This text can be logged as an aid to resolving issues. - Description string - - // map carrying information about the error condition - Info map[string]any -} - -func (e *Error) Marshal(wr *buffer.Buffer) error { - return MarshalComposite(wr, TypeCodeError, []MarshalField{ - {Value: &e.Condition, Omit: false}, - {Value: &e.Description, Omit: e.Description == ""}, - {Value: e.Info, Omit: len(e.Info) == 0}, - }) -} - -func (e *Error) Unmarshal(r *buffer.Buffer) error { - return UnmarshalComposite(r, TypeCodeError, []UnmarshalField{ - {Field: &e.Condition, HandleNull: func() error { return errors.New("Error.Condition is required") }}, - {Field: &e.Description}, - {Field: &e.Info}, - }...) -} - -func (e *Error) String() string { - if e == nil { - return "*Error(nil)" - } - return fmt.Sprintf("*Error{Condition: %s, Description: %s, Info: %v}", - e.Condition, - e.Description, - e.Info, - ) -} - -func (e *Error) Error() string { - return e.String() -} - -/* - - - - - -*/ - -type StateReceived struct { - // When sent by the sender this indicates the first section of the message - // (with section-number 0 being the first section) for which data can be resent. - // Data from sections prior to the given section cannot be retransmitted for - // this delivery. - // - // When sent by the receiver this indicates the first section of the message - // for which all data might not yet have been received. - SectionNumber uint32 - - // When sent by the sender this indicates the first byte of the encoded section - // data of the section given by section-number for which data can be resent - // (with section-offset 0 being the first byte). Bytes from the same section - // prior to the given offset section cannot be retransmitted for this delivery. - // - // When sent by the receiver this indicates the first byte of the given section - // which has not yet been received. Note that if a receiver has received all of - // section number X (which contains N bytes of data), but none of section number - // X + 1, then it can indicate this by sending either Received(section-number=X, - // section-offset=N) or Received(section-number=X+1, section-offset=0). The state - // Received(section-number=0, section-offset=0) indicates that no message data - // at all has been transferred. - SectionOffset uint64 -} - -func (sr *StateReceived) deliveryState() {} - -func (sr *StateReceived) Marshal(wr *buffer.Buffer) error { - return MarshalComposite(wr, TypeCodeStateReceived, []MarshalField{ - {Value: &sr.SectionNumber, Omit: false}, - {Value: &sr.SectionOffset, Omit: false}, - }) -} - -func (sr *StateReceived) Unmarshal(r *buffer.Buffer) error { - return UnmarshalComposite(r, TypeCodeStateReceived, []UnmarshalField{ - {Field: &sr.SectionNumber, HandleNull: func() error { return errors.New("StateReceiver.SectionNumber is required") }}, - {Field: &sr.SectionOffset, HandleNull: func() error { return errors.New("StateReceiver.SectionOffset is required") }}, - }...) -} - -/* - - - -*/ - -type StateAccepted struct{} - -func (sr *StateAccepted) deliveryState() {} - -func (sa *StateAccepted) Marshal(wr *buffer.Buffer) error { - return MarshalComposite(wr, TypeCodeStateAccepted, nil) -} - -func (sa *StateAccepted) Unmarshal(r *buffer.Buffer) error { - return UnmarshalComposite(r, TypeCodeStateAccepted) -} - -func (sa *StateAccepted) String() string { - return "Accepted" -} - -/* - - - - -*/ - -type StateRejected struct { - Error *Error -} - -func (sr *StateRejected) deliveryState() {} - -func (sr *StateRejected) Marshal(wr *buffer.Buffer) error { - return MarshalComposite(wr, TypeCodeStateRejected, []MarshalField{ - {Value: sr.Error, Omit: sr.Error == nil}, - }) -} - -func (sr *StateRejected) Unmarshal(r *buffer.Buffer) error { - return UnmarshalComposite(r, TypeCodeStateRejected, - UnmarshalField{Field: &sr.Error}, - ) -} - -func (sr *StateRejected) String() string { - return fmt.Sprintf("Rejected{Error: %v}", sr.Error) -} - -/* - - - -*/ - -type StateReleased struct{} - -func (sr *StateReleased) deliveryState() {} - -func (sr *StateReleased) Marshal(wr *buffer.Buffer) error { - return MarshalComposite(wr, TypeCodeStateReleased, nil) -} - -func (sr *StateReleased) Unmarshal(r *buffer.Buffer) error { - return UnmarshalComposite(r, TypeCodeStateReleased) -} - -func (sr *StateReleased) String() string { - return "Released" -} - -/* - - - - - - -*/ - -type StateModified struct { - // count the transfer as an unsuccessful delivery attempt - // - // If the delivery-failed flag is set, any messages modified - // MUST have their delivery-count incremented. - DeliveryFailed bool - - // prevent redelivery - // - // If the undeliverable-here is set, then any messages released MUST NOT - // be redelivered to the modifying link endpoint. - UndeliverableHere bool - - // message attributes - // Map containing attributes to combine with the existing message-annotations - // held in the message's header section. Where the existing message-annotations - // of the message contain an entry with the same key as an entry in this field, - // the value in this field associated with that key replaces the one in the - // existing headers; where the existing message-annotations has no such value, - // the value in this map is added. - MessageAnnotations Annotations -} - -func (sr *StateModified) deliveryState() {} - -func (sm *StateModified) Marshal(wr *buffer.Buffer) error { - return MarshalComposite(wr, TypeCodeStateModified, []MarshalField{ - {Value: &sm.DeliveryFailed, Omit: !sm.DeliveryFailed}, - {Value: &sm.UndeliverableHere, Omit: !sm.UndeliverableHere}, - {Value: sm.MessageAnnotations, Omit: sm.MessageAnnotations == nil}, - }) -} - -func (sm *StateModified) Unmarshal(r *buffer.Buffer) error { - return UnmarshalComposite(r, TypeCodeStateModified, []UnmarshalField{ - {Field: &sm.DeliveryFailed}, - {Field: &sm.UndeliverableHere}, - {Field: &sm.MessageAnnotations}, - }...) -} - -func (sm *StateModified) String() string { - return fmt.Sprintf("Modified{DeliveryFailed: %t, UndeliverableHere: %t, MessageAnnotations: %v}", sm.DeliveryFailed, sm.UndeliverableHere, sm.MessageAnnotations) -} - -// symbol is an AMQP symbolic string. -type Symbol string - -func (s Symbol) Marshal(wr *buffer.Buffer) error { - l := len(s) - switch { - // Sym8 - case l < 256: - wr.Append([]byte{ - byte(TypeCodeSym8), - byte(l), - }) - wr.AppendString(string(s)) - - // Sym32 - case uint(l) < math.MaxUint32: - wr.AppendByte(uint8(TypeCodeSym32)) - wr.AppendUint32(uint32(l)) - wr.AppendString(string(s)) - default: - return errors.New("too long") - } - return nil -} - -type Milliseconds time.Duration - -func (m Milliseconds) Marshal(wr *buffer.Buffer) error { - writeUint32(wr, uint32(m/Milliseconds(time.Millisecond))) - return nil -} - -func (m *Milliseconds) Unmarshal(r *buffer.Buffer) error { - n, err := readUint(r) - *m = Milliseconds(time.Duration(n) * time.Millisecond) - return err -} - -// mapAnyAny is used to decode AMQP maps who's keys are undefined or -// inconsistently typed. -type mapAnyAny map[any]any - -func (m mapAnyAny) Marshal(wr *buffer.Buffer) error { - return writeMap(wr, map[any]any(m)) -} - -func (m *mapAnyAny) Unmarshal(r *buffer.Buffer) error { - count, err := readMapHeader(r) - if err != nil { - return err - } - - mm := make(mapAnyAny, count/2) - for i := uint32(0); i < count; i += 2 { - key, err := ReadAny(r) - if err != nil { - return err - } - value, err := ReadAny(r) - if err != nil { - return err - } - - // https://golang.org/ref/spec#Map_types: - // The comparison operators == and != must be fully defined - // for operands of the key type; thus the key type must not - // be a function, map, or slice. - switch reflect.ValueOf(key).Kind() { - case reflect.Slice, reflect.Func, reflect.Map: - return errors.New("invalid map key") - } - - mm[key] = value - } - *m = mm - return nil -} - -// mapStringAny is used to decode AMQP maps that have string keys -type mapStringAny map[string]any - -func (m mapStringAny) Marshal(wr *buffer.Buffer) error { - return writeMap(wr, map[string]any(m)) -} - -func (m *mapStringAny) Unmarshal(r *buffer.Buffer) error { - count, err := readMapHeader(r) - if err != nil { - return err - } - - mm := make(mapStringAny, count/2) - for i := uint32(0); i < count; i += 2 { - key, err := ReadString(r) - if err != nil { - return err - } - value, err := ReadAny(r) - if err != nil { - return err - } - mm[key] = value - } - *m = mm - - return nil -} - -// mapStringAny is used to decode AMQP maps that have Symbol keys -type mapSymbolAny map[Symbol]any - -func (m mapSymbolAny) Marshal(wr *buffer.Buffer) error { - return writeMap(wr, map[Symbol]any(m)) -} - -func (m *mapSymbolAny) Unmarshal(r *buffer.Buffer) error { - count, err := readMapHeader(r) - if err != nil { - return err - } - - mm := make(mapSymbolAny, count/2) - for i := uint32(0); i < count; i += 2 { - key, err := ReadString(r) - if err != nil { - return err - } - value, err := ReadAny(r) - if err != nil { - return err - } - mm[Symbol(key)] = value - } - *m = mm - return nil -} - -// UUID is a 128 bit identifier as defined in RFC 4122. -type UUID [16]byte - -// String returns the hex encoded representation described in RFC 4122, Section 3. -func (u UUID) String() string { - var buf [36]byte - hex.Encode(buf[:8], u[:4]) - buf[8] = '-' - hex.Encode(buf[9:13], u[4:6]) - buf[13] = '-' - hex.Encode(buf[14:18], u[6:8]) - buf[18] = '-' - hex.Encode(buf[19:23], u[8:10]) - buf[23] = '-' - hex.Encode(buf[24:], u[10:]) - return string(buf[:]) -} - -func (u UUID) Marshal(wr *buffer.Buffer) error { - wr.AppendByte(byte(TypeCodeUUID)) - wr.Append(u[:]) - return nil -} - -func (u *UUID) Unmarshal(r *buffer.Buffer) error { - un, err := readUUID(r) - *u = un - return err -} - -type LifetimePolicy uint8 - -const ( - DeleteOnClose = LifetimePolicy(TypeCodeDeleteOnClose) - DeleteOnNoLinks = LifetimePolicy(TypeCodeDeleteOnNoLinks) - DeleteOnNoMessages = LifetimePolicy(TypeCodeDeleteOnNoMessages) - DeleteOnNoLinksOrMessages = LifetimePolicy(TypeCodeDeleteOnNoLinksOrMessages) -) - -func (p LifetimePolicy) Marshal(wr *buffer.Buffer) error { - wr.Append([]byte{ - 0x0, - byte(TypeCodeSmallUlong), - byte(p), - byte(TypeCodeList0), - }) - return nil -} - -func (p *LifetimePolicy) Unmarshal(r *buffer.Buffer) error { - typ, fields, err := readCompositeHeader(r) - if err != nil { - return err - } - if fields != 0 { - return fmt.Errorf("invalid size %d for lifetime-policy", fields) - } - *p = LifetimePolicy(typ) - return nil -} - -type DescribedType struct { - Descriptor any - Value any -} - -func (t DescribedType) Marshal(wr *buffer.Buffer) error { - wr.AppendByte(0x0) // descriptor constructor - err := Marshal(wr, t.Descriptor) - if err != nil { - return err - } - return Marshal(wr, t.Value) -} - -func (t *DescribedType) Unmarshal(r *buffer.Buffer) error { - b, err := r.ReadByte() - if err != nil { - return err - } - - if b != 0x0 { - return fmt.Errorf("invalid described type header %02x", b) - } - - err = Unmarshal(r, &t.Descriptor) - if err != nil { - return err - } - return Unmarshal(r, &t.Value) -} - -func (t DescribedType) String() string { - return fmt.Sprintf("DescribedType{descriptor: %v, value: %v}", - t.Descriptor, - t.Value, - ) -} - -// SLICES - -// ArrayUByte allows encoding []uint8/[]byte as an array -// rather than binary data. -type ArrayUByte []uint8 - -func (a ArrayUByte) Marshal(wr *buffer.Buffer) error { - const typeSize = 1 - - writeArrayHeader(wr, len(a), typeSize, TypeCodeUbyte) - wr.Append(a) - - return nil -} - -func (a *ArrayUByte) Unmarshal(r *buffer.Buffer) error { - length, err := readArrayHeader(r) - if err != nil { - return err - } - - type_, err := readType(r) - if err != nil { - return err - } - if type_ != TypeCodeUbyte { - return fmt.Errorf("invalid type for []uint16 %02x", type_) - } - - buf, ok := r.Next(length) - if !ok { - return fmt.Errorf("invalid length %d", length) - } - *a = append([]byte(nil), buf...) - - return nil -} - -type arrayInt8 []int8 - -func (a arrayInt8) Marshal(wr *buffer.Buffer) error { - const typeSize = 1 - - writeArrayHeader(wr, len(a), typeSize, TypeCodeByte) - - for _, value := range a { - wr.AppendByte(uint8(value)) - } - - return nil -} - -func (a *arrayInt8) Unmarshal(r *buffer.Buffer) error { - length, err := readArrayHeader(r) - if err != nil { - return err - } - - type_, err := readType(r) - if err != nil { - return err - } - if type_ != TypeCodeByte { - return fmt.Errorf("invalid type for []uint16 %02x", type_) - } - - buf, ok := r.Next(length) - if !ok { - return fmt.Errorf("invalid length %d", length) - } - - aa := (*a)[:0] - if int64(cap(aa)) < length { - aa = make([]int8, length) - } else { - aa = aa[:length] - } - - for i, value := range buf { - aa[i] = int8(value) - } - - *a = aa - return nil -} - -type arrayUint16 []uint16 - -func (a arrayUint16) Marshal(wr *buffer.Buffer) error { - const typeSize = 2 - - writeArrayHeader(wr, len(a), typeSize, TypeCodeUshort) - - for _, element := range a { - wr.AppendUint16(element) - } - - return nil -} - -func (a *arrayUint16) Unmarshal(r *buffer.Buffer) error { - length, err := readArrayHeader(r) - if err != nil { - return err - } - - type_, err := readType(r) - if err != nil { - return err - } - if type_ != TypeCodeUshort { - return fmt.Errorf("invalid type for []uint16 %02x", type_) - } - - const typeSize = 2 - buf, ok := r.Next(length * typeSize) - if !ok { - return fmt.Errorf("invalid length %d", length) - } - - aa := (*a)[:0] - if int64(cap(aa)) < length { - aa = make([]uint16, length) - } else { - aa = aa[:length] - } - - var bufIdx int - for i := range aa { - aa[i] = binary.BigEndian.Uint16(buf[bufIdx:]) - bufIdx += 2 - } - - *a = aa - return nil -} - -type arrayInt16 []int16 - -func (a arrayInt16) Marshal(wr *buffer.Buffer) error { - const typeSize = 2 - - writeArrayHeader(wr, len(a), typeSize, TypeCodeShort) - - for _, element := range a { - wr.AppendUint16(uint16(element)) - } - - return nil -} - -func (a *arrayInt16) Unmarshal(r *buffer.Buffer) error { - length, err := readArrayHeader(r) - if err != nil { - return err - } - - type_, err := readType(r) - if err != nil { - return err - } - if type_ != TypeCodeShort { - return fmt.Errorf("invalid type for []uint16 %02x", type_) - } - - const typeSize = 2 - buf, ok := r.Next(length * typeSize) - if !ok { - return fmt.Errorf("invalid length %d", length) - } - - aa := (*a)[:0] - if int64(cap(aa)) < length { - aa = make([]int16, length) - } else { - aa = aa[:length] - } - - var bufIdx int - for i := range aa { - aa[i] = int16(binary.BigEndian.Uint16(buf[bufIdx : bufIdx+2])) - bufIdx += 2 - } - - *a = aa - return nil -} - -type arrayUint32 []uint32 - -func (a arrayUint32) Marshal(wr *buffer.Buffer) error { - var ( - typeSize = 1 - TypeCode = TypeCodeSmallUint - ) - for _, n := range a { - if n > math.MaxUint8 { - typeSize = 4 - TypeCode = TypeCodeUint - break - } - } - - writeArrayHeader(wr, len(a), typeSize, TypeCode) - - if TypeCode == TypeCodeUint { - for _, element := range a { - wr.AppendUint32(element) - } - } else { - for _, element := range a { - wr.AppendByte(byte(element)) - } - } - - return nil -} - -func (a *arrayUint32) Unmarshal(r *buffer.Buffer) error { - length, err := readArrayHeader(r) - if err != nil { - return err - } - - aa := (*a)[:0] - - type_, err := readType(r) - if err != nil { - return err - } - switch type_ { - case TypeCodeUint0: - if int64(cap(aa)) < length { - aa = make([]uint32, length) - } else { - aa = aa[:length] - for i := range aa { - aa[i] = 0 - } - } - case TypeCodeSmallUint: - buf, ok := r.Next(length) - if !ok { - return errors.New("invalid length") - } - - if int64(cap(aa)) < length { - aa = make([]uint32, length) - } else { - aa = aa[:length] - } - - for i, n := range buf { - aa[i] = uint32(n) - } - case TypeCodeUint: - const typeSize = 4 - buf, ok := r.Next(length * typeSize) - if !ok { - return fmt.Errorf("invalid length %d", length) - } - - if int64(cap(aa)) < length { - aa = make([]uint32, length) - } else { - aa = aa[:length] - } - - var bufIdx int - for i := range aa { - aa[i] = binary.BigEndian.Uint32(buf[bufIdx : bufIdx+4]) - bufIdx += 4 - } - default: - return fmt.Errorf("invalid type for []uint32 %02x", type_) - } - - *a = aa - return nil -} - -type arrayInt32 []int32 - -func (a arrayInt32) Marshal(wr *buffer.Buffer) error { - var ( - typeSize = 1 - TypeCode = TypeCodeSmallint - ) - for _, n := range a { - if n > math.MaxInt8 { - typeSize = 4 - TypeCode = TypeCodeInt - break - } - } - - writeArrayHeader(wr, len(a), typeSize, TypeCode) - - if TypeCode == TypeCodeInt { - for _, element := range a { - wr.AppendUint32(uint32(element)) - } - } else { - for _, element := range a { - wr.AppendByte(byte(element)) - } - } - - return nil -} - -func (a *arrayInt32) Unmarshal(r *buffer.Buffer) error { - length, err := readArrayHeader(r) - if err != nil { - return err - } - - aa := (*a)[:0] - - type_, err := readType(r) - if err != nil { - return err - } - switch type_ { - case TypeCodeSmallint: - buf, ok := r.Next(length) - if !ok { - return errors.New("invalid length") - } - - if int64(cap(aa)) < length { - aa = make([]int32, length) - } else { - aa = aa[:length] - } - - for i, n := range buf { - aa[i] = int32(int8(n)) - } - case TypeCodeInt: - const typeSize = 4 - buf, ok := r.Next(length * typeSize) - if !ok { - return fmt.Errorf("invalid length %d", length) - } - - if int64(cap(aa)) < length { - aa = make([]int32, length) - } else { - aa = aa[:length] - } - - var bufIdx int - for i := range aa { - aa[i] = int32(binary.BigEndian.Uint32(buf[bufIdx:])) - bufIdx += 4 - } - default: - return fmt.Errorf("invalid type for []int32 %02x", type_) - } - - *a = aa - return nil -} - -type arrayUint64 []uint64 - -func (a arrayUint64) Marshal(wr *buffer.Buffer) error { - var ( - typeSize = 1 - TypeCode = TypeCodeSmallUlong - ) - for _, n := range a { - if n > math.MaxUint8 { - typeSize = 8 - TypeCode = TypeCodeUlong - break - } - } - - writeArrayHeader(wr, len(a), typeSize, TypeCode) - - if TypeCode == TypeCodeUlong { - for _, element := range a { - wr.AppendUint64(element) - } - } else { - for _, element := range a { - wr.AppendByte(byte(element)) - } - } - - return nil -} - -func (a *arrayUint64) Unmarshal(r *buffer.Buffer) error { - length, err := readArrayHeader(r) - if err != nil { - return err - } - - aa := (*a)[:0] - - type_, err := readType(r) - if err != nil { - return err - } - switch type_ { - case TypeCodeUlong0: - if int64(cap(aa)) < length { - aa = make([]uint64, length) - } else { - aa = aa[:length] - for i := range aa { - aa[i] = 0 - } - } - case TypeCodeSmallUlong: - buf, ok := r.Next(length) - if !ok { - return errors.New("invalid length") - } - - if int64(cap(aa)) < length { - aa = make([]uint64, length) - } else { - aa = aa[:length] - } - - for i, n := range buf { - aa[i] = uint64(n) - } - case TypeCodeUlong: - const typeSize = 8 - buf, ok := r.Next(length * typeSize) - if !ok { - return errors.New("invalid length") - } - - if int64(cap(aa)) < length { - aa = make([]uint64, length) - } else { - aa = aa[:length] - } - - var bufIdx int - for i := range aa { - aa[i] = binary.BigEndian.Uint64(buf[bufIdx : bufIdx+8]) - bufIdx += 8 - } - default: - return fmt.Errorf("invalid type for []uint64 %02x", type_) - } - - *a = aa - return nil -} - -type arrayInt64 []int64 - -func (a arrayInt64) Marshal(wr *buffer.Buffer) error { - var ( - typeSize = 1 - TypeCode = TypeCodeSmalllong - ) - for _, n := range a { - if n > math.MaxInt8 { - typeSize = 8 - TypeCode = TypeCodeLong - break - } - } - - writeArrayHeader(wr, len(a), typeSize, TypeCode) - - if TypeCode == TypeCodeLong { - for _, element := range a { - wr.AppendUint64(uint64(element)) - } - } else { - for _, element := range a { - wr.AppendByte(byte(element)) - } - } - - return nil -} - -func (a *arrayInt64) Unmarshal(r *buffer.Buffer) error { - length, err := readArrayHeader(r) - if err != nil { - return err - } - - aa := (*a)[:0] - - type_, err := readType(r) - if err != nil { - return err - } - switch type_ { - case TypeCodeSmalllong: - buf, ok := r.Next(length) - if !ok { - return errors.New("invalid length") - } - - if int64(cap(aa)) < length { - aa = make([]int64, length) - } else { - aa = aa[:length] - } - - for i, n := range buf { - aa[i] = int64(int8(n)) - } - case TypeCodeLong: - const typeSize = 8 - buf, ok := r.Next(length * typeSize) - if !ok { - return errors.New("invalid length") - } - - if int64(cap(aa)) < length { - aa = make([]int64, length) - } else { - aa = aa[:length] - } - - var bufIdx int - for i := range aa { - aa[i] = int64(binary.BigEndian.Uint64(buf[bufIdx:])) - bufIdx += 8 - } - default: - return fmt.Errorf("invalid type for []uint64 %02x", type_) - } - - *a = aa - return nil -} - -type arrayFloat []float32 - -func (a arrayFloat) Marshal(wr *buffer.Buffer) error { - const typeSize = 4 - - writeArrayHeader(wr, len(a), typeSize, TypeCodeFloat) - - for _, element := range a { - wr.AppendUint32(math.Float32bits(element)) - } - - return nil -} - -func (a *arrayFloat) Unmarshal(r *buffer.Buffer) error { - length, err := readArrayHeader(r) - if err != nil { - return err - } - - type_, err := readType(r) - if err != nil { - return err - } - if type_ != TypeCodeFloat { - return fmt.Errorf("invalid type for []float32 %02x", type_) - } - - const typeSize = 4 - buf, ok := r.Next(length * typeSize) - if !ok { - return fmt.Errorf("invalid length %d", length) - } - - aa := (*a)[:0] - if int64(cap(aa)) < length { - aa = make([]float32, length) - } else { - aa = aa[:length] - } - - var bufIdx int - for i := range aa { - bits := binary.BigEndian.Uint32(buf[bufIdx:]) - aa[i] = math.Float32frombits(bits) - bufIdx += typeSize - } - - *a = aa - return nil -} - -type arrayDouble []float64 - -func (a arrayDouble) Marshal(wr *buffer.Buffer) error { - const typeSize = 8 - - writeArrayHeader(wr, len(a), typeSize, TypeCodeDouble) - - for _, element := range a { - wr.AppendUint64(math.Float64bits(element)) - } - - return nil -} - -func (a *arrayDouble) Unmarshal(r *buffer.Buffer) error { - length, err := readArrayHeader(r) - if err != nil { - return err - } - - type_, err := readType(r) - if err != nil { - return err - } - if type_ != TypeCodeDouble { - return fmt.Errorf("invalid type for []float64 %02x", type_) - } - - const typeSize = 8 - buf, ok := r.Next(length * typeSize) - if !ok { - return fmt.Errorf("invalid length %d", length) - } - - aa := (*a)[:0] - if int64(cap(aa)) < length { - aa = make([]float64, length) - } else { - aa = aa[:length] - } - - var bufIdx int - for i := range aa { - bits := binary.BigEndian.Uint64(buf[bufIdx:]) - aa[i] = math.Float64frombits(bits) - bufIdx += typeSize - } - - *a = aa - return nil -} - -type arrayBool []bool - -func (a arrayBool) Marshal(wr *buffer.Buffer) error { - const typeSize = 1 - - writeArrayHeader(wr, len(a), typeSize, TypeCodeBool) - - for _, element := range a { - value := byte(0) - if element { - value = 1 - } - wr.AppendByte(value) - } - - return nil -} - -func (a *arrayBool) Unmarshal(r *buffer.Buffer) error { - length, err := readArrayHeader(r) - if err != nil { - return err - } - - aa := (*a)[:0] - if int64(cap(aa)) < length { - aa = make([]bool, length) - } else { - aa = aa[:length] - } - - type_, err := readType(r) - if err != nil { - return err - } - switch type_ { - case TypeCodeBool: - buf, ok := r.Next(length) - if !ok { - return errors.New("invalid length") - } - - for i, value := range buf { - if value == 0 { - aa[i] = false - } else { - aa[i] = true - } - } - - case TypeCodeBoolTrue: - for i := range aa { - aa[i] = true - } - case TypeCodeBoolFalse: - for i := range aa { - aa[i] = false - } - default: - return fmt.Errorf("invalid type for []bool %02x", type_) - } - - *a = aa - return nil -} - -type arrayString []string - -func (a arrayString) Marshal(wr *buffer.Buffer) error { - var ( - elementType = TypeCodeStr8 - elementsSizeTotal int - ) - for _, element := range a { - if !utf8.ValidString(element) { - return errors.New("not a valid UTF-8 string") - } - - elementsSizeTotal += len(element) - - if len(element) > math.MaxUint8 { - elementType = TypeCodeStr32 - } - } - - writeVariableArrayHeader(wr, len(a), elementsSizeTotal, elementType) - - if elementType == TypeCodeStr32 { - for _, element := range a { - wr.AppendUint32(uint32(len(element))) - wr.AppendString(element) - } - } else { - for _, element := range a { - wr.AppendByte(byte(len(element))) - wr.AppendString(element) - } - } - - return nil -} - -func (a *arrayString) Unmarshal(r *buffer.Buffer) error { - length, err := readArrayHeader(r) - if err != nil { - return err - } - - const typeSize = 2 // assume all strings are at least 2 bytes - if length*typeSize > int64(r.Len()) { - return fmt.Errorf("invalid length %d", length) - } - - aa := (*a)[:0] - if int64(cap(aa)) < length { - aa = make([]string, length) - } else { - aa = aa[:length] - } - - type_, err := readType(r) - if err != nil { - return err - } - switch type_ { - case TypeCodeStr8: - for i := range aa { - size, err := r.ReadByte() - if err != nil { - return err - } - - buf, ok := r.Next(int64(size)) - if !ok { - return errors.New("invalid length") - } - - aa[i] = string(buf) - } - case TypeCodeStr32: - for i := range aa { - buf, ok := r.Next(4) - if !ok { - return errors.New("invalid length") - } - size := int64(binary.BigEndian.Uint32(buf)) - - buf, ok = r.Next(size) - if !ok { - return errors.New("invalid length") - } - aa[i] = string(buf) - } - default: - return fmt.Errorf("invalid type for []string %02x", type_) - } - - *a = aa - return nil -} - -type arraySymbol []Symbol - -func (a arraySymbol) Marshal(wr *buffer.Buffer) error { - var ( - elementType = TypeCodeSym8 - elementsSizeTotal int - ) - for _, element := range a { - elementsSizeTotal += len(element) - - if len(element) > math.MaxUint8 { - elementType = TypeCodeSym32 - } - } - - writeVariableArrayHeader(wr, len(a), elementsSizeTotal, elementType) - - if elementType == TypeCodeSym32 { - for _, element := range a { - wr.AppendUint32(uint32(len(element))) - wr.AppendString(string(element)) - } - } else { - for _, element := range a { - wr.AppendByte(byte(len(element))) - wr.AppendString(string(element)) - } - } - - return nil -} - -func (a *arraySymbol) Unmarshal(r *buffer.Buffer) error { - length, err := readArrayHeader(r) - if err != nil { - return err - } - - const typeSize = 2 // assume all symbols are at least 2 bytes - if length*typeSize > int64(r.Len()) { - return fmt.Errorf("invalid length %d", length) - } - - aa := (*a)[:0] - if int64(cap(aa)) < length { - aa = make([]Symbol, length) - } else { - aa = aa[:length] - } - - type_, err := readType(r) - if err != nil { - return err - } - switch type_ { - case TypeCodeSym8: - for i := range aa { - size, err := r.ReadByte() - if err != nil { - return err - } - - buf, ok := r.Next(int64(size)) - if !ok { - return errors.New("invalid length") - } - aa[i] = Symbol(buf) - } - case TypeCodeSym32: - for i := range aa { - buf, ok := r.Next(4) - if !ok { - return errors.New("invalid length") - } - size := int64(binary.BigEndian.Uint32(buf)) - - buf, ok = r.Next(size) - if !ok { - return errors.New("invalid length") - } - aa[i] = Symbol(buf) - } - default: - return fmt.Errorf("invalid type for []Symbol %02x", type_) - } - - *a = aa - return nil -} - -type arrayBinary [][]byte - -func (a arrayBinary) Marshal(wr *buffer.Buffer) error { - var ( - elementType = TypeCodeVbin8 - elementsSizeTotal int - ) - for _, element := range a { - elementsSizeTotal += len(element) - - if len(element) > math.MaxUint8 { - elementType = TypeCodeVbin32 - } - } - - writeVariableArrayHeader(wr, len(a), elementsSizeTotal, elementType) - - if elementType == TypeCodeVbin32 { - for _, element := range a { - wr.AppendUint32(uint32(len(element))) - wr.Append(element) - } - } else { - for _, element := range a { - wr.AppendByte(byte(len(element))) - wr.Append(element) - } - } - - return nil -} - -func (a *arrayBinary) Unmarshal(r *buffer.Buffer) error { - length, err := readArrayHeader(r) - if err != nil { - return err - } - - const typeSize = 2 // assume all binary is at least 2 bytes - if length*typeSize > int64(r.Len()) { - return fmt.Errorf("invalid length %d", length) - } - - aa := (*a)[:0] - if int64(cap(aa)) < length { - aa = make([][]byte, length) - } else { - aa = aa[:length] - } - - type_, err := readType(r) - if err != nil { - return err - } - switch type_ { - case TypeCodeVbin8: - for i := range aa { - size, err := r.ReadByte() - if err != nil { - return err - } - - buf, ok := r.Next(int64(size)) - if !ok { - return fmt.Errorf("invalid length %d", length) - } - aa[i] = append([]byte(nil), buf...) - } - case TypeCodeVbin32: - for i := range aa { - buf, ok := r.Next(4) - if !ok { - return errors.New("invalid length") - } - size := binary.BigEndian.Uint32(buf) - - buf, ok = r.Next(int64(size)) - if !ok { - return errors.New("invalid length") - } - aa[i] = append([]byte(nil), buf...) - } - default: - return fmt.Errorf("invalid type for [][]byte %02x", type_) - } - - *a = aa - return nil -} - -type arrayTimestamp []time.Time - -func (a arrayTimestamp) Marshal(wr *buffer.Buffer) error { - const typeSize = 8 - - writeArrayHeader(wr, len(a), typeSize, TypeCodeTimestamp) - - for _, element := range a { - ms := element.UnixNano() / int64(time.Millisecond) - wr.AppendUint64(uint64(ms)) - } - - return nil -} - -func (a *arrayTimestamp) Unmarshal(r *buffer.Buffer) error { - length, err := readArrayHeader(r) - if err != nil { - return err - } - - type_, err := readType(r) - if err != nil { - return err - } - if type_ != TypeCodeTimestamp { - return fmt.Errorf("invalid type for []time.Time %02x", type_) - } - - const typeSize = 8 - buf, ok := r.Next(length * typeSize) - if !ok { - return fmt.Errorf("invalid length %d", length) - } - - aa := (*a)[:0] - if int64(cap(aa)) < length { - aa = make([]time.Time, length) - } else { - aa = aa[:length] - } - - var bufIdx int - for i := range aa { - ms := int64(binary.BigEndian.Uint64(buf[bufIdx:])) - bufIdx += typeSize - aa[i] = time.Unix(ms/1000, (ms%1000)*1000000).UTC() - } - - *a = aa - return nil -} - -type arrayUUID []UUID - -func (a arrayUUID) Marshal(wr *buffer.Buffer) error { - const typeSize = 16 - - writeArrayHeader(wr, len(a), typeSize, TypeCodeUUID) - - for _, element := range a { - wr.Append(element[:]) - } - - return nil -} - -func (a *arrayUUID) Unmarshal(r *buffer.Buffer) error { - length, err := readArrayHeader(r) - if err != nil { - return err - } - - type_, err := readType(r) - if err != nil { - return err - } - if type_ != TypeCodeUUID { - return fmt.Errorf("invalid type for []UUID %#02x", type_) - } - - const typeSize = 16 - buf, ok := r.Next(length * typeSize) - if !ok { - return fmt.Errorf("invalid length %d", length) - } - - aa := (*a)[:0] - if int64(cap(aa)) < length { - aa = make([]UUID, length) - } else { - aa = aa[:length] - } - - var bufIdx int - for i := range aa { - copy(aa[i][:], buf[bufIdx:bufIdx+16]) - bufIdx += 16 - } - - *a = aa - return nil -} - -// LIST - -type list []any - -func (l list) Marshal(wr *buffer.Buffer) error { - length := len(l) - - // type - if length == 0 { - wr.AppendByte(byte(TypeCodeList0)) - return nil - } - wr.AppendByte(byte(TypeCodeList32)) - - // size - sizeIdx := wr.Len() - wr.Append([]byte{0, 0, 0, 0}) - - // length - wr.AppendUint32(uint32(length)) - - for _, element := range l { - err := Marshal(wr, element) - if err != nil { - return err - } - } - - // overwrite size - binary.BigEndian.PutUint32(wr.Bytes()[sizeIdx:], uint32(wr.Len()-(sizeIdx+4))) - - return nil -} - -func (l *list) Unmarshal(r *buffer.Buffer) error { - length, err := readListHeader(r) - if err != nil { - return err - } - - // assume that all types are at least 1 byte - if length > int64(r.Len()) { - return fmt.Errorf("invalid length %d", length) - } - - ll := *l - if int64(cap(ll)) < length { - ll = make([]any, length) - } else { - ll = ll[:length] - } - - for i := range ll { - ll[i], err = ReadAny(r) - if err != nil { - return err - } - } - - *l = ll - return nil -} - -// multiSymbol can decode a single symbol or an array. -type MultiSymbol []Symbol - -func (ms MultiSymbol) Marshal(wr *buffer.Buffer) error { - return Marshal(wr, []Symbol(ms)) -} - -func (ms *MultiSymbol) Unmarshal(r *buffer.Buffer) error { - type_, err := peekType(r) - if err != nil { - return err - } - - if type_ == TypeCodeSym8 || type_ == TypeCodeSym32 { - s, err := ReadString(r) - if err != nil { - return err - } - - *ms = []Symbol{Symbol(s)} - return nil - } - - return Unmarshal(r, (*[]Symbol)(ms)) -} diff --git a/sdk/messaging/azeventhubs/internal/go-amqp/internal/frames/frames.go b/sdk/messaging/azeventhubs/internal/go-amqp/internal/frames/frames.go deleted file mode 100644 index 63491582a85d..000000000000 --- a/sdk/messaging/azeventhubs/internal/go-amqp/internal/frames/frames.go +++ /dev/null @@ -1,1543 +0,0 @@ -// Copyright (C) 2017 Kale Blankenship -// Portions Copyright (c) Microsoft Corporation - -package frames - -import ( - "errors" - "fmt" - "strconv" - "time" - - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp/internal/buffer" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp/internal/encoding" -) - -// Type contains the values for a frame's type. -type Type uint8 - -const ( - TypeAMQP Type = 0x0 - TypeSASL Type = 0x1 -) - -// String implements the fmt.Stringer interface for type Type. -func (t Type) String() string { - if t == 0 { - return "AMQP" - } - return "SASL" -} - -/* - - - - - - - - - - - - - - - - -*/ -type Source struct { - // the address of the source - // - // The address of the source MUST NOT be set when sent on a attach frame sent by - // the receiving link endpoint where the dynamic flag is set to true (that is where - // the receiver is requesting the sender to create an addressable node). - // - // The address of the source MUST be set when sent on a attach frame sent by the - // sending link endpoint where the dynamic flag is set to true (that is where the - // sender has created an addressable node at the request of the receiver and is now - // communicating the address of that created node). The generated name of the address - // SHOULD include the link name and the container-id of the remote container to allow - // for ease of identification. - Address string - - // indicates the durability of the terminus - // - // Indicates what state of the terminus will be retained durably: the state of durable - // messages, only existence and configuration of the terminus, or no state at all. - // - // 0: none - // 1: configuration - // 2: unsettled-state - Durable encoding.Durability - - // the expiry policy of the source - // - // link-detach: The expiry timer starts when terminus is detached. - // session-end: The expiry timer starts when the most recently associated session is - // ended. - // connection-close: The expiry timer starts when most recently associated connection - // is closed. - // never: The terminus never expires. - ExpiryPolicy encoding.ExpiryPolicy - - // duration that an expiring source will be retained - // - // The source starts expiring as indicated by the expiry-policy. - Timeout uint32 // seconds - - // request dynamic creation of a remote node - // - // When set to true by the receiving link endpoint, this field constitutes a request - // for the sending peer to dynamically create a node at the source. In this case the - // address field MUST NOT be set. - // - // When set to true by the sending link endpoint this field indicates creation of a - // dynamically created node. In this case the address field will contain the address - // of the created node. The generated address SHOULD include the link name and other - // available information on the initiator of the request (such as the remote - // container-id) in some recognizable form for ease of traceability. - Dynamic bool - - // properties of the dynamically created node - // - // If the dynamic field is not set to true this field MUST be left unset. - // - // When set by the receiving link endpoint, this field contains the desired - // properties of the node the receiver wishes to be created. When set by the - // sending link endpoint this field contains the actual properties of the - // dynamically created node. See subsection 3.5.9 for standard node properties. - // http://www.amqp.org/specification/1.0/node-properties - // - // lifetime-policy: The lifetime of a dynamically generated node. - // Definitionally, the lifetime will never be less than the lifetime - // of the link which caused its creation, however it is possible to - // extend the lifetime of dynamically created node using a lifetime - // policy. The value of this entry MUST be of a type which provides - // the lifetime-policy archetype. The following standard - // lifetime-policies are defined below: delete-on-close, - // delete-on-no-links, delete-on-no-messages or - // delete-on-no-links-or-messages. - // supported-dist-modes: The distribution modes that the node supports. - // The value of this entry MUST be one or more symbols which are valid - // distribution-modes. That is, the value MUST be of the same type as - // would be valid in a field defined with the following attributes: - // type="symbol" multiple="true" requires="distribution-mode" - DynamicNodeProperties map[encoding.Symbol]any // TODO: implement custom type with validation - - // the distribution mode of the link - // - // This field MUST be set by the sending end of the link if the endpoint supports more - // than one distribution-mode. This field MAY be set by the receiving end of the link - // to indicate a preference when a node supports multiple distribution modes. - DistributionMode encoding.Symbol - - // a set of predicates to filter the messages admitted onto the link - // - // The receiving endpoint sets its desired filter, the sending endpoint sets the filter - // actually in place (including any filters defaulted at the node). The receiving - // endpoint MUST check that the filter in place meets its needs and take responsibility - // for detaching if it does not. - Filter encoding.Filter - - // default outcome for unsettled transfers - // - // Indicates the outcome to be used for transfers that have not reached a terminal - // state at the receiver when the transfer is settled, including when the source - // is destroyed. The value MUST be a valid outcome (e.g., released or rejected). - DefaultOutcome any - - // descriptors for the outcomes that can be chosen on this link - // - // The values in this field are the symbolic descriptors of the outcomes that can - // be chosen on this link. This field MAY be empty, indicating that the default-outcome - // will be assumed for all message transfers (if the default-outcome is not set, and no - // outcomes are provided, then the accepted outcome MUST be supported by the source). - // - // When present, the values MUST be a symbolic descriptor of a valid outcome, - // e.g., "amqp:accepted:list". - Outcomes encoding.MultiSymbol - - // the extension capabilities the sender supports/desires - // - // http://www.amqp.org/specification/1.0/source-capabilities - Capabilities encoding.MultiSymbol -} - -func (s *Source) Marshal(wr *buffer.Buffer) error { - return encoding.MarshalComposite(wr, encoding.TypeCodeSource, []encoding.MarshalField{ - {Value: &s.Address, Omit: s.Address == ""}, - {Value: &s.Durable, Omit: s.Durable == encoding.DurabilityNone}, - {Value: &s.ExpiryPolicy, Omit: s.ExpiryPolicy == "" || s.ExpiryPolicy == encoding.ExpirySessionEnd}, - {Value: &s.Timeout, Omit: s.Timeout == 0}, - {Value: &s.Dynamic, Omit: !s.Dynamic}, - {Value: s.DynamicNodeProperties, Omit: len(s.DynamicNodeProperties) == 0}, - {Value: &s.DistributionMode, Omit: s.DistributionMode == ""}, - {Value: s.Filter, Omit: len(s.Filter) == 0}, - {Value: &s.DefaultOutcome, Omit: s.DefaultOutcome == nil}, - {Value: &s.Outcomes, Omit: len(s.Outcomes) == 0}, - {Value: &s.Capabilities, Omit: len(s.Capabilities) == 0}, - }) -} - -func (s *Source) Unmarshal(r *buffer.Buffer) error { - return encoding.UnmarshalComposite(r, encoding.TypeCodeSource, []encoding.UnmarshalField{ - {Field: &s.Address}, - {Field: &s.Durable}, - {Field: &s.ExpiryPolicy, HandleNull: func() error { s.ExpiryPolicy = encoding.ExpirySessionEnd; return nil }}, - {Field: &s.Timeout}, - {Field: &s.Dynamic}, - {Field: &s.DynamicNodeProperties}, - {Field: &s.DistributionMode}, - {Field: &s.Filter}, - {Field: &s.DefaultOutcome}, - {Field: &s.Outcomes}, - {Field: &s.Capabilities}, - }...) -} - -func (s Source) String() string { - return fmt.Sprintf("source{Address: %s, Durable: %d, ExpiryPolicy: %s, Timeout: %d, "+ - "Dynamic: %t, DynamicNodeProperties: %v, DistributionMode: %s, Filter: %v, DefaultOutcome: %v "+ - "Outcomes: %v, Capabilities: %v}", - s.Address, - s.Durable, - s.ExpiryPolicy, - s.Timeout, - s.Dynamic, - s.DynamicNodeProperties, - s.DistributionMode, - s.Filter, - s.DefaultOutcome, - s.Outcomes, - s.Capabilities, - ) -} - -/* - - - - - - - - - - - - -*/ -type Target struct { - // the address of the target - // - // The address of the target MUST NOT be set when sent on a attach frame sent by - // the sending link endpoint where the dynamic flag is set to true (that is where - // the sender is requesting the receiver to create an addressable node). - // - // The address of the source MUST be set when sent on a attach frame sent by the - // receiving link endpoint where the dynamic flag is set to true (that is where - // the receiver has created an addressable node at the request of the sender and - // is now communicating the address of that created node). The generated name of - // the address SHOULD include the link name and the container-id of the remote - // container to allow for ease of identification. - Address string - - // indicates the durability of the terminus - // - // Indicates what state of the terminus will be retained durably: the state of durable - // messages, only existence and configuration of the terminus, or no state at all. - // - // 0: none - // 1: configuration - // 2: unsettled-state - Durable encoding.Durability - - // the expiry policy of the target - // - // link-detach: The expiry timer starts when terminus is detached. - // session-end: The expiry timer starts when the most recently associated session is - // ended. - // connection-close: The expiry timer starts when most recently associated connection - // is closed. - // never: The terminus never expires. - ExpiryPolicy encoding.ExpiryPolicy - - // duration that an expiring target will be retained - // - // The target starts expiring as indicated by the expiry-policy. - Timeout uint32 // seconds - - // request dynamic creation of a remote node - // - // When set to true by the sending link endpoint, this field constitutes a request - // for the receiving peer to dynamically create a node at the target. In this case - // the address field MUST NOT be set. - // - // When set to true by the receiving link endpoint this field indicates creation of - // a dynamically created node. In this case the address field will contain the - // address of the created node. The generated address SHOULD include the link name - // and other available information on the initiator of the request (such as the - // remote container-id) in some recognizable form for ease of traceability. - Dynamic bool - - // properties of the dynamically created node - // - // If the dynamic field is not set to true this field MUST be left unset. - // - // When set by the sending link endpoint, this field contains the desired - // properties of the node the sender wishes to be created. When set by the - // receiving link endpoint this field contains the actual properties of the - // dynamically created node. See subsection 3.5.9 for standard node properties. - // http://www.amqp.org/specification/1.0/node-properties - // - // lifetime-policy: The lifetime of a dynamically generated node. - // Definitionally, the lifetime will never be less than the lifetime - // of the link which caused its creation, however it is possible to - // extend the lifetime of dynamically created node using a lifetime - // policy. The value of this entry MUST be of a type which provides - // the lifetime-policy archetype. The following standard - // lifetime-policies are defined below: delete-on-close, - // delete-on-no-links, delete-on-no-messages or - // delete-on-no-links-or-messages. - // supported-dist-modes: The distribution modes that the node supports. - // The value of this entry MUST be one or more symbols which are valid - // distribution-modes. That is, the value MUST be of the same type as - // would be valid in a field defined with the following attributes: - // type="symbol" multiple="true" requires="distribution-mode" - DynamicNodeProperties map[encoding.Symbol]any // TODO: implement custom type with validation - - // the extension capabilities the sender supports/desires - // - // http://www.amqp.org/specification/1.0/target-capabilities - Capabilities encoding.MultiSymbol -} - -func (t *Target) Marshal(wr *buffer.Buffer) error { - return encoding.MarshalComposite(wr, encoding.TypeCodeTarget, []encoding.MarshalField{ - {Value: &t.Address, Omit: t.Address == ""}, - {Value: &t.Durable, Omit: t.Durable == encoding.DurabilityNone}, - {Value: &t.ExpiryPolicy, Omit: t.ExpiryPolicy == "" || t.ExpiryPolicy == encoding.ExpirySessionEnd}, - {Value: &t.Timeout, Omit: t.Timeout == 0}, - {Value: &t.Dynamic, Omit: !t.Dynamic}, - {Value: t.DynamicNodeProperties, Omit: len(t.DynamicNodeProperties) == 0}, - {Value: &t.Capabilities, Omit: len(t.Capabilities) == 0}, - }) -} - -func (t *Target) Unmarshal(r *buffer.Buffer) error { - return encoding.UnmarshalComposite(r, encoding.TypeCodeTarget, []encoding.UnmarshalField{ - {Field: &t.Address}, - {Field: &t.Durable}, - {Field: &t.ExpiryPolicy, HandleNull: func() error { t.ExpiryPolicy = encoding.ExpirySessionEnd; return nil }}, - {Field: &t.Timeout}, - {Field: &t.Dynamic}, - {Field: &t.DynamicNodeProperties}, - {Field: &t.Capabilities}, - }...) -} - -func (t Target) String() string { - return fmt.Sprintf("source{Address: %s, Durable: %d, ExpiryPolicy: %s, Timeout: %d, "+ - "Dynamic: %t, DynamicNodeProperties: %v, Capabilities: %v}", - t.Address, - t.Durable, - t.ExpiryPolicy, - t.Timeout, - t.Dynamic, - t.DynamicNodeProperties, - t.Capabilities, - ) -} - -// frame is the decoded representation of a frame -type Frame struct { - Type Type // AMQP/SASL - Channel uint16 // channel this frame is for - Body FrameBody // body of the frame -} - -// String implements the fmt.Stringer interface for type Frame. -func (f Frame) String() string { - return fmt.Sprintf("Frame{Type: %s, Channel: %d, Body: %s}", f.Type, f.Channel, f.Body) -} - -// frameBody adds some type safety to frame encoding -type FrameBody interface { - frameBody() -} - -/* - - - - - - - - - - - - - -*/ - -type PerformOpen struct { - ContainerID string // required - Hostname string - MaxFrameSize uint32 // default: 4294967295 - ChannelMax uint16 // default: 65535 - IdleTimeout time.Duration // from milliseconds - OutgoingLocales encoding.MultiSymbol - IncomingLocales encoding.MultiSymbol - OfferedCapabilities encoding.MultiSymbol - DesiredCapabilities encoding.MultiSymbol - Properties map[encoding.Symbol]any -} - -func (o *PerformOpen) frameBody() {} - -func (o *PerformOpen) Marshal(wr *buffer.Buffer) error { - return encoding.MarshalComposite(wr, encoding.TypeCodeOpen, []encoding.MarshalField{ - {Value: &o.ContainerID, Omit: false}, - {Value: &o.Hostname, Omit: o.Hostname == ""}, - {Value: &o.MaxFrameSize, Omit: o.MaxFrameSize == 4294967295}, - {Value: &o.ChannelMax, Omit: o.ChannelMax == 65535}, - {Value: (*encoding.Milliseconds)(&o.IdleTimeout), Omit: o.IdleTimeout == 0}, - {Value: &o.OutgoingLocales, Omit: len(o.OutgoingLocales) == 0}, - {Value: &o.IncomingLocales, Omit: len(o.IncomingLocales) == 0}, - {Value: &o.OfferedCapabilities, Omit: len(o.OfferedCapabilities) == 0}, - {Value: &o.DesiredCapabilities, Omit: len(o.DesiredCapabilities) == 0}, - {Value: o.Properties, Omit: len(o.Properties) == 0}, - }) -} - -func (o *PerformOpen) Unmarshal(r *buffer.Buffer) error { - return encoding.UnmarshalComposite(r, encoding.TypeCodeOpen, []encoding.UnmarshalField{ - {Field: &o.ContainerID, HandleNull: func() error { return errors.New("Open.ContainerID is required") }}, - {Field: &o.Hostname}, - {Field: &o.MaxFrameSize, HandleNull: func() error { o.MaxFrameSize = 4294967295; return nil }}, - {Field: &o.ChannelMax, HandleNull: func() error { o.ChannelMax = 65535; return nil }}, - {Field: (*encoding.Milliseconds)(&o.IdleTimeout)}, - {Field: &o.OutgoingLocales}, - {Field: &o.IncomingLocales}, - {Field: &o.OfferedCapabilities}, - {Field: &o.DesiredCapabilities}, - {Field: &o.Properties}, - }...) -} - -func (o *PerformOpen) String() string { - return fmt.Sprintf("Open{ContainerID : %s, Hostname: %s, MaxFrameSize: %d, "+ - "ChannelMax: %d, IdleTimeout: %v, "+ - "OutgoingLocales: %v, IncomingLocales: %v, "+ - "OfferedCapabilities: %v, DesiredCapabilities: %v, "+ - "Properties: %v}", - o.ContainerID, - o.Hostname, - o.MaxFrameSize, - o.ChannelMax, - o.IdleTimeout, - o.OutgoingLocales, - o.IncomingLocales, - o.OfferedCapabilities, - o.DesiredCapabilities, - o.Properties, - ) -} - -/* - - - - - - - - - - - - - -*/ -type PerformBegin struct { - // the remote channel for this session - // If a session is locally initiated, the remote-channel MUST NOT be set. - // When an endpoint responds to a remotely initiated session, the remote-channel - // MUST be set to the channel on which the remote session sent the begin. - RemoteChannel *uint16 - - // the transfer-id of the first transfer id the sender will send - NextOutgoingID uint32 // required, sequence number http://www.ietf.org/rfc/rfc1982.txt - - // the initial incoming-window of the sender - IncomingWindow uint32 // required - - // the initial outgoing-window of the sender - OutgoingWindow uint32 // required - - // the maximum handle value that can be used on the session - // The handle-max value is the highest handle value that can be - // used on the session. A peer MUST NOT attempt to attach a link - // using a handle value outside the range that its partner can handle. - // A peer that receives a handle outside the supported range MUST - // close the connection with the framing-error error-code. - HandleMax uint32 // default 4294967295 - - // the extension capabilities the sender supports - // http://www.amqp.org/specification/1.0/session-capabilities - OfferedCapabilities encoding.MultiSymbol - - // the extension capabilities the sender can use if the receiver supports them - // The sender MUST NOT attempt to use any capability other than those it - // has declared in desired-capabilities field. - DesiredCapabilities encoding.MultiSymbol - - // session properties - // http://www.amqp.org/specification/1.0/session-properties - Properties map[encoding.Symbol]any -} - -func (b *PerformBegin) frameBody() {} - -func (b *PerformBegin) String() string { - return fmt.Sprintf("Begin{RemoteChannel: %v, NextOutgoingID: %d, IncomingWindow: %d, "+ - "OutgoingWindow: %d, HandleMax: %d, OfferedCapabilities: %v, DesiredCapabilities: %v, "+ - "Properties: %v}", - formatUint16Ptr(b.RemoteChannel), - b.NextOutgoingID, - b.IncomingWindow, - b.OutgoingWindow, - b.HandleMax, - b.OfferedCapabilities, - b.DesiredCapabilities, - b.Properties, - ) -} - -func formatUint16Ptr(p *uint16) string { - if p == nil { - return "" - } - return strconv.FormatUint(uint64(*p), 10) -} - -func (b *PerformBegin) Marshal(wr *buffer.Buffer) error { - return encoding.MarshalComposite(wr, encoding.TypeCodeBegin, []encoding.MarshalField{ - {Value: b.RemoteChannel, Omit: b.RemoteChannel == nil}, - {Value: &b.NextOutgoingID, Omit: false}, - {Value: &b.IncomingWindow, Omit: false}, - {Value: &b.OutgoingWindow, Omit: false}, - {Value: &b.HandleMax, Omit: b.HandleMax == 4294967295}, - {Value: &b.OfferedCapabilities, Omit: len(b.OfferedCapabilities) == 0}, - {Value: &b.DesiredCapabilities, Omit: len(b.DesiredCapabilities) == 0}, - {Value: b.Properties, Omit: b.Properties == nil}, - }) -} - -func (b *PerformBegin) Unmarshal(r *buffer.Buffer) error { - return encoding.UnmarshalComposite(r, encoding.TypeCodeBegin, []encoding.UnmarshalField{ - {Field: &b.RemoteChannel}, - {Field: &b.NextOutgoingID, HandleNull: func() error { return errors.New("Begin.NextOutgoingID is required") }}, - {Field: &b.IncomingWindow, HandleNull: func() error { return errors.New("Begin.IncomingWindow is required") }}, - {Field: &b.OutgoingWindow, HandleNull: func() error { return errors.New("Begin.OutgoingWindow is required") }}, - {Field: &b.HandleMax, HandleNull: func() error { b.HandleMax = 4294967295; return nil }}, - {Field: &b.OfferedCapabilities}, - {Field: &b.DesiredCapabilities}, - {Field: &b.Properties}, - }...) -} - -/* - - - - - - - - - - - - - - - - - - - -*/ -type PerformAttach struct { - // the name of the link - // - // This name uniquely identifies the link from the container of the source - // to the container of the target node, e.g., if the container of the source - // node is A, and the container of the target node is B, the link MAY be - // globally identified by the (ordered) tuple (A,B,). - Name string // required - - // the handle for the link while attached - // - // The numeric handle assigned by the the peer as a shorthand to refer to the - // link in all performatives that reference the link until the it is detached. - // - // The handle MUST NOT be used for other open links. An attempt to attach using - // a handle which is already associated with a link MUST be responded to with - // an immediate close carrying a handle-in-use session-error. - // - // To make it easier to monitor AMQP link attach frames, it is RECOMMENDED that - // implementations always assign the lowest available handle to this field. - // - // The two endpoints MAY potentially use different handles to refer to the same link. - // Link handles MAY be reused once a link is closed for both send and receive. - Handle uint32 // required - - // role of the link endpoint - // - // The role being played by the peer, i.e., whether the peer is the sender or the - // receiver of messages on the link. - Role encoding.Role - - // settlement policy for the sender - // - // The delivery settlement policy for the sender. When set at the receiver this - // indicates the desired value for the settlement mode at the sender. When set - // at the sender this indicates the actual settlement mode in use. The sender - // SHOULD respect the receiver's desired settlement mode if the receiver initiates - // the attach exchange and the sender supports the desired mode. - // - // 0: unsettled - The sender will send all deliveries initially unsettled to the receiver. - // 1: settled - The sender will send all deliveries settled to the receiver. - // 2: mixed - The sender MAY send a mixture of settled and unsettled deliveries to the receiver. - SenderSettleMode *encoding.SenderSettleMode - - // the settlement policy of the receiver - // - // The delivery settlement policy for the receiver. When set at the sender this - // indicates the desired value for the settlement mode at the receiver. - // When set at the receiver this indicates the actual settlement mode in use. - // The receiver SHOULD respect the sender's desired settlement mode if the sender - // initiates the attach exchange and the receiver supports the desired mode. - // - // 0: first - The receiver will spontaneously settle all incoming transfers. - // 1: second - The receiver will only settle after sending the disposition to - // the sender and receiving a disposition indicating settlement of - // the delivery from the sender. - ReceiverSettleMode *encoding.ReceiverSettleMode - - // the source for messages - // - // If no source is specified on an outgoing link, then there is no source currently - // attached to the link. A link with no source will never produce outgoing messages. - Source *Source - - // the target for messages - // - // If no target is specified on an incoming link, then there is no target currently - // attached to the link. A link with no target will never permit incoming messages. - Target *Target - - // unsettled delivery state - // - // This is used to indicate any unsettled delivery states when a suspended link is - // resumed. The map is keyed by delivery-tag with values indicating the delivery state. - // The local and remote delivery states for a given delivery-tag MUST be compared to - // resolve any in-doubt deliveries. If necessary, deliveries MAY be resent, or resumed - // based on the outcome of this comparison. See subsection 2.6.13. - // - // If the local unsettled map is too large to be encoded within a frame of the agreed - // maximum frame size then the session MAY be ended with the frame-size-too-small error. - // The endpoint SHOULD make use of the ability to send an incomplete unsettled map - // (see below) to avoid sending an error. - // - // The unsettled map MUST NOT contain null valued keys. - // - // When reattaching (as opposed to resuming), the unsettled map MUST be null. - Unsettled encoding.Unsettled - - // If set to true this field indicates that the unsettled map provided is not complete. - // When the map is incomplete the recipient of the map cannot take the absence of a - // delivery tag from the map as evidence of settlement. On receipt of an incomplete - // unsettled map a sending endpoint MUST NOT send any new deliveries (i.e. deliveries - // where resume is not set to true) to its partner (and a receiving endpoint which sent - // an incomplete unsettled map MUST detach with an error on receiving a transfer which - // does not have the resume flag set to true). - // - // Note that if this flag is set to true then the endpoints MUST detach and reattach at - // least once in order to send new deliveries. This flag can be useful when there are - // too many entries in the unsettled map to fit within a single frame. An endpoint can - // attach, resume, settle, and detach until enough unsettled state has been cleared for - // an attach where this flag is set to false. - IncompleteUnsettled bool // default: false - - // the sender's initial value for delivery-count - // - // This MUST NOT be null if role is sender, and it is ignored if the role is receiver. - InitialDeliveryCount uint32 // sequence number - - // the maximum message size supported by the link endpoint - // - // This field indicates the maximum message size supported by the link endpoint. - // Any attempt to deliver a message larger than this results in a message-size-exceeded - // link-error. If this field is zero or unset, there is no maximum size imposed by the - // link endpoint. - MaxMessageSize uint64 - - // the extension capabilities the sender supports - // http://www.amqp.org/specification/1.0/link-capabilities - OfferedCapabilities encoding.MultiSymbol - - // the extension capabilities the sender can use if the receiver supports them - // - // The sender MUST NOT attempt to use any capability other than those it - // has declared in desired-capabilities field. - DesiredCapabilities encoding.MultiSymbol - - // link properties - // http://www.amqp.org/specification/1.0/link-properties - Properties map[encoding.Symbol]any -} - -func (a *PerformAttach) frameBody() {} - -func (a PerformAttach) String() string { - return fmt.Sprintf("Attach{Name: %s, Handle: %d, Role: %s, SenderSettleMode: %s, ReceiverSettleMode: %s, "+ - "Source: %v, Target: %v, Unsettled: %v, IncompleteUnsettled: %t, InitialDeliveryCount: %d, MaxMessageSize: %d, "+ - "OfferedCapabilities: %v, DesiredCapabilities: %v, Properties: %v}", - a.Name, - a.Handle, - a.Role, - a.SenderSettleMode, - a.ReceiverSettleMode, - a.Source, - a.Target, - a.Unsettled, - a.IncompleteUnsettled, - a.InitialDeliveryCount, - a.MaxMessageSize, - a.OfferedCapabilities, - a.DesiredCapabilities, - a.Properties, - ) -} - -func (a *PerformAttach) Marshal(wr *buffer.Buffer) error { - return encoding.MarshalComposite(wr, encoding.TypeCodeAttach, []encoding.MarshalField{ - {Value: &a.Name, Omit: false}, - {Value: &a.Handle, Omit: false}, - {Value: &a.Role, Omit: false}, - {Value: a.SenderSettleMode, Omit: a.SenderSettleMode == nil}, - {Value: a.ReceiverSettleMode, Omit: a.ReceiverSettleMode == nil}, - {Value: a.Source, Omit: a.Source == nil}, - {Value: a.Target, Omit: a.Target == nil}, - {Value: a.Unsettled, Omit: len(a.Unsettled) == 0}, - {Value: &a.IncompleteUnsettled, Omit: !a.IncompleteUnsettled}, - {Value: &a.InitialDeliveryCount, Omit: a.Role == encoding.RoleReceiver}, - {Value: &a.MaxMessageSize, Omit: a.MaxMessageSize == 0}, - {Value: &a.OfferedCapabilities, Omit: len(a.OfferedCapabilities) == 0}, - {Value: &a.DesiredCapabilities, Omit: len(a.DesiredCapabilities) == 0}, - {Value: a.Properties, Omit: len(a.Properties) == 0}, - }) -} - -func (a *PerformAttach) Unmarshal(r *buffer.Buffer) error { - return encoding.UnmarshalComposite(r, encoding.TypeCodeAttach, []encoding.UnmarshalField{ - {Field: &a.Name, HandleNull: func() error { return errors.New("Attach.Name is required") }}, - {Field: &a.Handle, HandleNull: func() error { return errors.New("Attach.Handle is required") }}, - {Field: &a.Role, HandleNull: func() error { return errors.New("Attach.Role is required") }}, - {Field: &a.SenderSettleMode}, - {Field: &a.ReceiverSettleMode}, - {Field: &a.Source}, - {Field: &a.Target}, - {Field: &a.Unsettled}, - {Field: &a.IncompleteUnsettled}, - {Field: &a.InitialDeliveryCount}, - {Field: &a.MaxMessageSize}, - {Field: &a.OfferedCapabilities}, - {Field: &a.DesiredCapabilities}, - {Field: &a.Properties}, - }...) -} - -/* - - - - - - - - - - - - - - - - -*/ -type PerformFlow struct { - // Identifies the expected transfer-id of the next incoming transfer frame. - // This value MUST be set if the peer has received the begin frame for the - // session, and MUST NOT be set if it has not. See subsection 2.5.6 for more details. - NextIncomingID *uint32 // sequence number - - // Defines the maximum number of incoming transfer frames that the endpoint - // can currently receive. See subsection 2.5.6 for more details. - IncomingWindow uint32 // required - - // The transfer-id that will be assigned to the next outgoing transfer frame. - // See subsection 2.5.6 for more details. - NextOutgoingID uint32 // sequence number - - // Defines the maximum number of outgoing transfer frames that the endpoint - // could potentially currently send, if it was not constrained by restrictions - // imposed by its peer's incoming-window. See subsection 2.5.6 for more details. - OutgoingWindow uint32 - - // If set, indicates that the flow frame carries flow state information for the local - // link endpoint associated with the given handle. If not set, the flow frame is - // carrying only information pertaining to the session endpoint. - // - // If set to a handle that is not currently associated with an attached link, - // the recipient MUST respond by ending the session with an unattached-handle - // session error. - Handle *uint32 - - // The delivery-count is initialized by the sender when a link endpoint is created, - // and is incremented whenever a message is sent. Only the sender MAY independently - // modify this field. The receiver's value is calculated based on the last known - // value from the sender and any subsequent messages received on the link. Note that, - // despite its name, the delivery-count is not a count but a sequence number - // initialized at an arbitrary point by the sender. - // - // When the handle field is not set, this field MUST NOT be set. - // - // When the handle identifies that the flow state is being sent from the sender link - // endpoint to receiver link endpoint this field MUST be set to the current - // delivery-count of the link endpoint. - // - // When the flow state is being sent from the receiver endpoint to the sender endpoint - // this field MUST be set to the last known value of the corresponding sending endpoint. - // In the event that the receiving link endpoint has not yet seen the initial attach - // frame from the sender this field MUST NOT be set. - DeliveryCount *uint32 // sequence number - - // the current maximum number of messages that can be received - // - // The current maximum number of messages that can be handled at the receiver endpoint - // of the link. Only the receiver endpoint can independently set this value. The sender - // endpoint sets this to the last known value seen from the receiver. - // See subsection 2.6.7 for more details. - // - // When the handle field is not set, this field MUST NOT be set. - LinkCredit *uint32 - - // the number of available messages - // - // The number of messages awaiting credit at the link sender endpoint. Only the sender - // can independently set this value. The receiver sets this to the last known value seen - // from the sender. See subsection 2.6.7 for more details. - // - // When the handle field is not set, this field MUST NOT be set. - Available *uint32 - - // indicates drain mode - // - // When flow state is sent from the sender to the receiver, this field contains the - // actual drain mode of the sender. When flow state is sent from the receiver to the - // sender, this field contains the desired drain mode of the receiver. - // See subsection 2.6.7 for more details. - // - // When the handle field is not set, this field MUST NOT be set. - Drain bool - - // request state from partner - // - // If set to true then the receiver SHOULD send its state at the earliest convenient - // opportunity. - // - // If set to true, and the handle field is not set, then the sender only requires - // session endpoint state to be echoed, however, the receiver MAY fulfil this requirement - // by sending a flow performative carrying link-specific state (since any such flow also - // carries session state). - // - // If a sender makes multiple requests for the same state before the receiver can reply, - // the receiver MAY send only one flow in return. - // - // Note that if a peer responds to echo requests with flows which themselves have the - // echo field set to true, an infinite loop could result if its partner adopts the same - // policy (therefore such a policy SHOULD be avoided). - Echo bool - - // link state properties - // http://www.amqp.org/specification/1.0/link-state-properties - Properties map[encoding.Symbol]any -} - -func (f *PerformFlow) frameBody() {} - -func (f *PerformFlow) String() string { - return fmt.Sprintf("Flow{NextIncomingID: %s, IncomingWindow: %d, NextOutgoingID: %d, OutgoingWindow: %d, "+ - "Handle: %s, DeliveryCount: %s, LinkCredit: %s, Available: %s, Drain: %t, Echo: %t, Properties: %+v}", - formatUint32Ptr(f.NextIncomingID), - f.IncomingWindow, - f.NextOutgoingID, - f.OutgoingWindow, - formatUint32Ptr(f.Handle), - formatUint32Ptr(f.DeliveryCount), - formatUint32Ptr(f.LinkCredit), - formatUint32Ptr(f.Available), - f.Drain, - f.Echo, - f.Properties, - ) -} - -func formatUint32Ptr(p *uint32) string { - if p == nil { - return "" - } - return strconv.FormatUint(uint64(*p), 10) -} - -func (f *PerformFlow) Marshal(wr *buffer.Buffer) error { - return encoding.MarshalComposite(wr, encoding.TypeCodeFlow, []encoding.MarshalField{ - {Value: f.NextIncomingID, Omit: f.NextIncomingID == nil}, - {Value: &f.IncomingWindow, Omit: false}, - {Value: &f.NextOutgoingID, Omit: false}, - {Value: &f.OutgoingWindow, Omit: false}, - {Value: f.Handle, Omit: f.Handle == nil}, - {Value: f.DeliveryCount, Omit: f.DeliveryCount == nil}, - {Value: f.LinkCredit, Omit: f.LinkCredit == nil}, - {Value: f.Available, Omit: f.Available == nil}, - {Value: &f.Drain, Omit: !f.Drain}, - {Value: &f.Echo, Omit: !f.Echo}, - {Value: f.Properties, Omit: len(f.Properties) == 0}, - }) -} - -func (f *PerformFlow) Unmarshal(r *buffer.Buffer) error { - return encoding.UnmarshalComposite(r, encoding.TypeCodeFlow, []encoding.UnmarshalField{ - {Field: &f.NextIncomingID}, - {Field: &f.IncomingWindow, HandleNull: func() error { return errors.New("Flow.IncomingWindow is required") }}, - {Field: &f.NextOutgoingID, HandleNull: func() error { return errors.New("Flow.NextOutgoingID is required") }}, - {Field: &f.OutgoingWindow, HandleNull: func() error { return errors.New("Flow.OutgoingWindow is required") }}, - {Field: &f.Handle}, - {Field: &f.DeliveryCount}, - {Field: &f.LinkCredit}, - {Field: &f.Available}, - {Field: &f.Drain}, - {Field: &f.Echo}, - {Field: &f.Properties}, - }...) -} - -/* - - - - - - - - - - - - - - - - -*/ -type PerformTransfer struct { - // Specifies the link on which the message is transferred. - Handle uint32 // required - - // The delivery-id MUST be supplied on the first transfer of a multi-transfer - // delivery. On continuation transfers the delivery-id MAY be omitted. It is - // an error if the delivery-id on a continuation transfer differs from the - // delivery-id on the first transfer of a delivery. - DeliveryID *uint32 // sequence number - - // Uniquely identifies the delivery attempt for a given message on this link. - // This field MUST be specified for the first transfer of a multi-transfer - // message and can only be omitted for continuation transfers. It is an error - // if the delivery-tag on a continuation transfer differs from the delivery-tag - // on the first transfer of a delivery. - DeliveryTag []byte // up to 32 bytes - - // This field MUST be specified for the first transfer of a multi-transfer message - // and can only be omitted for continuation transfers. It is an error if the - // message-format on a continuation transfer differs from the message-format on - // the first transfer of a delivery. - // - // The upper three octets of a message format code identify a particular message - // format. The lowest octet indicates the version of said message format. Any given - // version of a format is forwards compatible with all higher versions. - MessageFormat *uint32 - - // If not set on the first (or only) transfer for a (multi-transfer) delivery, - // then the settled flag MUST be interpreted as being false. For subsequent - // transfers in a multi-transfer delivery if the settled flag is left unset then - // it MUST be interpreted as true if and only if the value of the settled flag on - // any of the preceding transfers was true; if no preceding transfer was sent with - // settled being true then the value when unset MUST be taken as false. - // - // If the negotiated value for snd-settle-mode at attachment is settled, then this - // field MUST be true on at least one transfer frame for a delivery (i.e., the - // delivery MUST be settled at the sender at the point the delivery has been - // completely transferred). - // - // If the negotiated value for snd-settle-mode at attachment is unsettled, then this - // field MUST be false (or unset) on every transfer frame for a delivery (unless the - // delivery is aborted). - Settled bool - - // indicates that the message has more content - // - // Note that if both the more and aborted fields are set to true, the aborted flag - // takes precedence. That is, a receiver SHOULD ignore the value of the more field - // if the transfer is marked as aborted. A sender SHOULD NOT set the more flag to - // true if it also sets the aborted flag to true. - More bool - - // If first, this indicates that the receiver MUST settle the delivery once it has - // arrived without waiting for the sender to settle first. - // - // If second, this indicates that the receiver MUST NOT settle until sending its - // disposition to the sender and receiving a settled disposition from the sender. - // - // If not set, this value is defaulted to the value negotiated on link attach. - // - // If the negotiated link value is first, then it is illegal to set this field - // to second. - // - // If the message is being sent settled by the sender, the value of this field - // is ignored. - // - // The (implicit or explicit) value of this field does not form part of the - // transfer state, and is not retained if a link is suspended and subsequently resumed. - // - // 0: first - The receiver will spontaneously settle all incoming transfers. - // 1: second - The receiver will only settle after sending the disposition to - // the sender and receiving a disposition indicating settlement of - // the delivery from the sender. - ReceiverSettleMode *encoding.ReceiverSettleMode - - // the state of the delivery at the sender - // - // When set this informs the receiver of the state of the delivery at the sender. - // This is particularly useful when transfers of unsettled deliveries are resumed - // after resuming a link. Setting the state on the transfer can be thought of as - // being equivalent to sending a disposition immediately before the transfer - // performative, i.e., it is the state of the delivery (not the transfer) that - // existed at the point the frame was sent. - // - // Note that if the transfer performative (or an earlier disposition performative - // referring to the delivery) indicates that the delivery has attained a terminal - // state, then no future transfer or disposition sent by the sender can alter that - // terminal state. - State encoding.DeliveryState - - // indicates a resumed delivery - // - // If true, the resume flag indicates that the transfer is being used to reassociate - // an unsettled delivery from a dissociated link endpoint. See subsection 2.6.13 - // for more details. - // - // The receiver MUST ignore resumed deliveries that are not in its local unsettled map. - // The sender MUST NOT send resumed transfers for deliveries not in its local - // unsettled map. - // - // If a resumed delivery spans more than one transfer performative, then the resume - // flag MUST be set to true on the first transfer of the resumed delivery. For - // subsequent transfers for the same delivery the resume flag MAY be set to true, - // or MAY be omitted. - // - // In the case where the exchange of unsettled maps makes clear that all message - // data has been successfully transferred to the receiver, and that only the final - // state (and potentially settlement) at the sender needs to be conveyed, then a - // resumed delivery MAY carry no payload and instead act solely as a vehicle for - // carrying the terminal state of the delivery at the sender. - Resume bool - - // indicates that the message is aborted - // - // Aborted messages SHOULD be discarded by the recipient (any payload within the - // frame carrying the performative MUST be ignored). An aborted message is - // implicitly settled. - Aborted bool - - // batchable hint - // - // If true, then the issuer is hinting that there is no need for the peer to urgently - // communicate updated delivery state. This hint MAY be used to artificially increase - // the amount of batching an implementation uses when communicating delivery states, - // and thereby save bandwidth. - // - // If the message being delivered is too large to fit within a single frame, then the - // setting of batchable to true on any of the transfer performatives for the delivery - // is equivalent to setting batchable to true for all the transfer performatives for - // the delivery. - // - // The batchable value does not form part of the transfer state, and is not retained - // if a link is suspended and subsequently resumed. - Batchable bool - - Payload []byte - - // optional channel to indicate to sender that transfer has completed - // - // Settled=true: closed when the transferred on network. - // Settled=false: closed when the receiver has confirmed settlement. - Done chan encoding.DeliveryState -} - -func (t *PerformTransfer) frameBody() {} - -func (t PerformTransfer) String() string { - deliveryTag := "" - if t.DeliveryTag != nil { - deliveryTag = fmt.Sprintf("%X", t.DeliveryTag) - } - - return fmt.Sprintf("Transfer{Handle: %d, DeliveryID: %s, DeliveryTag: %s, MessageFormat: %s, "+ - "Settled: %t, More: %t, ReceiverSettleMode: %s, State: %v, Resume: %t, Aborted: %t, "+ - "Batchable: %t, Payload [size]: %d}", - t.Handle, - formatUint32Ptr(t.DeliveryID), - deliveryTag, - formatUint32Ptr(t.MessageFormat), - t.Settled, - t.More, - t.ReceiverSettleMode, - t.State, - t.Resume, - t.Aborted, - t.Batchable, - len(t.Payload), - ) -} - -func (t *PerformTransfer) Marshal(wr *buffer.Buffer) error { - err := encoding.MarshalComposite(wr, encoding.TypeCodeTransfer, []encoding.MarshalField{ - {Value: &t.Handle}, - {Value: t.DeliveryID, Omit: t.DeliveryID == nil}, - {Value: &t.DeliveryTag, Omit: len(t.DeliveryTag) == 0}, - {Value: t.MessageFormat, Omit: t.MessageFormat == nil}, - {Value: &t.Settled, Omit: !t.Settled}, - {Value: &t.More, Omit: !t.More}, - {Value: t.ReceiverSettleMode, Omit: t.ReceiverSettleMode == nil}, - {Value: t.State, Omit: t.State == nil}, - {Value: &t.Resume, Omit: !t.Resume}, - {Value: &t.Aborted, Omit: !t.Aborted}, - {Value: &t.Batchable, Omit: !t.Batchable}, - }) - if err != nil { - return err - } - - wr.Append(t.Payload) - return nil -} - -func (t *PerformTransfer) Unmarshal(r *buffer.Buffer) error { - err := encoding.UnmarshalComposite(r, encoding.TypeCodeTransfer, []encoding.UnmarshalField{ - {Field: &t.Handle, HandleNull: func() error { return errors.New("Transfer.Handle is required") }}, - {Field: &t.DeliveryID}, - {Field: &t.DeliveryTag}, - {Field: &t.MessageFormat}, - {Field: &t.Settled}, - {Field: &t.More}, - {Field: &t.ReceiverSettleMode}, - {Field: &t.State}, - {Field: &t.Resume}, - {Field: &t.Aborted}, - {Field: &t.Batchable}, - }...) - if err != nil { - return err - } - - t.Payload = append([]byte(nil), r.Bytes()...) - - return err -} - -/* - - - - - - - - - - - -*/ -type PerformDisposition struct { - // directionality of disposition - // - // The role identifies whether the disposition frame contains information about - // sending link endpoints or receiving link endpoints. - Role encoding.Role - - // lower bound of deliveries - // - // Identifies the lower bound of delivery-ids for the deliveries in this set. - First uint32 // required, sequence number - - // upper bound of deliveries - // - // Identifies the upper bound of delivery-ids for the deliveries in this set. - // If not set, this is taken to be the same as first. - Last *uint32 // sequence number - - // indicates deliveries are settled - // - // If true, indicates that the referenced deliveries are considered settled by - // the issuing endpoint. - Settled bool - - // indicates state of deliveries - // - // Communicates the state of all the deliveries referenced by this disposition. - State encoding.DeliveryState - - // batchable hint - // - // If true, then the issuer is hinting that there is no need for the peer to - // urgently communicate the impact of the updated delivery states. This hint - // MAY be used to artificially increase the amount of batching an implementation - // uses when communicating delivery states, and thereby save bandwidth. - Batchable bool -} - -func (d *PerformDisposition) frameBody() {} - -func (d PerformDisposition) String() string { - return fmt.Sprintf("Disposition{Role: %s, First: %d, Last: %s, Settled: %t, State: %v, Batchable: %t}", - d.Role, - d.First, - formatUint32Ptr(d.Last), - d.Settled, - d.State, - d.Batchable, - ) -} - -func (d *PerformDisposition) Marshal(wr *buffer.Buffer) error { - return encoding.MarshalComposite(wr, encoding.TypeCodeDisposition, []encoding.MarshalField{ - {Value: &d.Role, Omit: false}, - {Value: &d.First, Omit: false}, - {Value: d.Last, Omit: d.Last == nil}, - {Value: &d.Settled, Omit: !d.Settled}, - {Value: d.State, Omit: d.State == nil}, - {Value: &d.Batchable, Omit: !d.Batchable}, - }) -} - -func (d *PerformDisposition) Unmarshal(r *buffer.Buffer) error { - return encoding.UnmarshalComposite(r, encoding.TypeCodeDisposition, []encoding.UnmarshalField{ - {Field: &d.Role, HandleNull: func() error { return errors.New("Disposition.Role is required") }}, - {Field: &d.First, HandleNull: func() error { return errors.New("Disposition.Handle is required") }}, - {Field: &d.Last}, - {Field: &d.Settled}, - {Field: &d.State}, - {Field: &d.Batchable}, - }...) -} - -/* - - - - - - - - -*/ -type PerformDetach struct { - // the local handle of the link to be detached - Handle uint32 //required - - // if true then the sender has closed the link - Closed bool - - // error causing the detach - // - // If set, this field indicates that the link is being detached due to an error - // condition. The value of the field SHOULD contain details on the cause of the error. - Error *encoding.Error -} - -func (d *PerformDetach) frameBody() {} - -func (d PerformDetach) String() string { - return fmt.Sprintf("Detach{Handle: %d, Closed: %t, Error: %v}", - d.Handle, - d.Closed, - d.Error, - ) -} - -func (d *PerformDetach) Marshal(wr *buffer.Buffer) error { - return encoding.MarshalComposite(wr, encoding.TypeCodeDetach, []encoding.MarshalField{ - {Value: &d.Handle, Omit: false}, - {Value: &d.Closed, Omit: !d.Closed}, - {Value: d.Error, Omit: d.Error == nil}, - }) -} - -func (d *PerformDetach) Unmarshal(r *buffer.Buffer) error { - return encoding.UnmarshalComposite(r, encoding.TypeCodeDetach, []encoding.UnmarshalField{ - {Field: &d.Handle, HandleNull: func() error { return errors.New("Detach.Handle is required") }}, - {Field: &d.Closed}, - {Field: &d.Error}, - }...) -} - -/* - - - - - - -*/ -type PerformEnd struct { - // error causing the end - // - // If set, this field indicates that the session is being ended due to an error - // condition. The value of the field SHOULD contain details on the cause of the error. - Error *encoding.Error -} - -func (e *PerformEnd) frameBody() {} - -func (d PerformEnd) String() string { - return fmt.Sprintf("End{Error: %v}", d.Error) -} - -func (e *PerformEnd) Marshal(wr *buffer.Buffer) error { - return encoding.MarshalComposite(wr, encoding.TypeCodeEnd, []encoding.MarshalField{ - {Value: e.Error, Omit: e.Error == nil}, - }) -} - -func (e *PerformEnd) Unmarshal(r *buffer.Buffer) error { - return encoding.UnmarshalComposite(r, encoding.TypeCodeEnd, - encoding.UnmarshalField{Field: &e.Error}, - ) -} - -/* - - - - - - -*/ -type PerformClose struct { - // error causing the close - // - // If set, this field indicates that the session is being closed due to an error - // condition. The value of the field SHOULD contain details on the cause of the error. - Error *encoding.Error -} - -func (c *PerformClose) frameBody() {} - -func (c *PerformClose) Marshal(wr *buffer.Buffer) error { - return encoding.MarshalComposite(wr, encoding.TypeCodeClose, []encoding.MarshalField{ - {Value: c.Error, Omit: c.Error == nil}, - }) -} - -func (c *PerformClose) Unmarshal(r *buffer.Buffer) error { - return encoding.UnmarshalComposite(r, encoding.TypeCodeClose, - encoding.UnmarshalField{Field: &c.Error}, - ) -} - -func (c *PerformClose) String() string { - return fmt.Sprintf("Close{Error: %s}", c.Error) -} - -/* - - - - - - -*/ - -type SASLInit struct { - Mechanism encoding.Symbol - InitialResponse []byte - Hostname string -} - -func (si *SASLInit) frameBody() {} - -func (si *SASLInit) Marshal(wr *buffer.Buffer) error { - return encoding.MarshalComposite(wr, encoding.TypeCodeSASLInit, []encoding.MarshalField{ - {Value: &si.Mechanism, Omit: false}, - {Value: &si.InitialResponse, Omit: false}, - {Value: &si.Hostname, Omit: len(si.Hostname) == 0}, - }) -} - -func (si *SASLInit) Unmarshal(r *buffer.Buffer) error { - return encoding.UnmarshalComposite(r, encoding.TypeCodeSASLInit, []encoding.UnmarshalField{ - {Field: &si.Mechanism, HandleNull: func() error { return errors.New("saslInit.Mechanism is required") }}, - {Field: &si.InitialResponse}, - {Field: &si.Hostname}, - }...) -} - -func (si *SASLInit) String() string { - // Elide the InitialResponse as it may contain a plain text secret. - return fmt.Sprintf("SaslInit{Mechanism : %s, InitialResponse: ********, Hostname: %s}", - si.Mechanism, - si.Hostname, - ) -} - -/* - - - - -*/ - -type SASLMechanisms struct { - Mechanisms encoding.MultiSymbol -} - -func (sm *SASLMechanisms) frameBody() {} - -func (sm *SASLMechanisms) Marshal(wr *buffer.Buffer) error { - return encoding.MarshalComposite(wr, encoding.TypeCodeSASLMechanism, []encoding.MarshalField{ - {Value: &sm.Mechanisms, Omit: false}, - }) -} - -func (sm *SASLMechanisms) Unmarshal(r *buffer.Buffer) error { - return encoding.UnmarshalComposite(r, encoding.TypeCodeSASLMechanism, - encoding.UnmarshalField{Field: &sm.Mechanisms, HandleNull: func() error { return errors.New("saslMechanisms.Mechanisms is required") }}, - ) -} - -func (sm *SASLMechanisms) String() string { - return fmt.Sprintf("SaslMechanisms{Mechanisms : %v}", - sm.Mechanisms, - ) -} - -/* - - - - -*/ - -type SASLChallenge struct { - Challenge []byte -} - -func (sc *SASLChallenge) String() string { - return "Challenge{Challenge: ********}" -} - -func (sc *SASLChallenge) frameBody() {} - -func (sc *SASLChallenge) Marshal(wr *buffer.Buffer) error { - return encoding.MarshalComposite(wr, encoding.TypeCodeSASLChallenge, []encoding.MarshalField{ - {Value: &sc.Challenge, Omit: false}, - }) -} - -func (sc *SASLChallenge) Unmarshal(r *buffer.Buffer) error { - return encoding.UnmarshalComposite(r, encoding.TypeCodeSASLChallenge, []encoding.UnmarshalField{ - {Field: &sc.Challenge, HandleNull: func() error { return errors.New("saslChallenge.Challenge is required") }}, - }...) -} - -/* - - - - -*/ - -type SASLResponse struct { - Response []byte -} - -func (sr *SASLResponse) String() string { - return "Response{Response: ********}" -} - -func (sr *SASLResponse) frameBody() {} - -func (sr *SASLResponse) Marshal(wr *buffer.Buffer) error { - return encoding.MarshalComposite(wr, encoding.TypeCodeSASLResponse, []encoding.MarshalField{ - {Value: &sr.Response, Omit: false}, - }) -} - -func (sr *SASLResponse) Unmarshal(r *buffer.Buffer) error { - return encoding.UnmarshalComposite(r, encoding.TypeCodeSASLResponse, []encoding.UnmarshalField{ - {Field: &sr.Response, HandleNull: func() error { return errors.New("saslResponse.Response is required") }}, - }...) -} - -/* - - - - - -*/ - -type SASLOutcome struct { - Code encoding.SASLCode - AdditionalData []byte -} - -func (so *SASLOutcome) frameBody() {} - -func (so *SASLOutcome) Marshal(wr *buffer.Buffer) error { - return encoding.MarshalComposite(wr, encoding.TypeCodeSASLOutcome, []encoding.MarshalField{ - {Value: &so.Code, Omit: false}, - {Value: &so.AdditionalData, Omit: len(so.AdditionalData) == 0}, - }) -} - -func (so *SASLOutcome) Unmarshal(r *buffer.Buffer) error { - return encoding.UnmarshalComposite(r, encoding.TypeCodeSASLOutcome, []encoding.UnmarshalField{ - {Field: &so.Code, HandleNull: func() error { return errors.New("saslOutcome.AdditionalData is required") }}, - {Field: &so.AdditionalData}, - }...) -} - -func (so *SASLOutcome) String() string { - return fmt.Sprintf("SaslOutcome{Code : %v, AdditionalData: %v}", - so.Code, - so.AdditionalData, - ) -} diff --git a/sdk/messaging/azeventhubs/internal/go-amqp/internal/frames/parsing.go b/sdk/messaging/azeventhubs/internal/go-amqp/internal/frames/parsing.go deleted file mode 100644 index 0e03a52a23e3..000000000000 --- a/sdk/messaging/azeventhubs/internal/go-amqp/internal/frames/parsing.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright (C) 2017 Kale Blankenship -// Portions Copyright (c) Microsoft Corporation - -package frames - -import ( - "encoding/binary" - "errors" - "fmt" - "math" - - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp/internal/buffer" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp/internal/encoding" -) - -const HeaderSize = 8 - -// Frame structure: -// -// header (8 bytes) -// 0-3: SIZE (total size, at least 8 bytes for header, uint32) -// 4: DOFF (data offset,at least 2, count of 4 bytes words, uint8) -// 5: TYPE (frame type) -// 0x0: AMQP -// 0x1: SASL -// 6-7: type dependent (channel for AMQP) -// extended header (opt) -// body (opt) - -// Header in a structure appropriate for use with binary.Read() -type Header struct { - // size: an unsigned 32-bit integer that MUST contain the total frame size of the frame header, - // extended header, and frame body. The frame is malformed if the size is less than the size of - // the frame header (8 bytes). - Size uint32 - // doff: gives the position of the body within the frame. The value of the data offset is an - // unsigned, 8-bit integer specifying a count of 4-byte words. Due to the mandatory 8-byte - // frame header, the frame is malformed if the value is less than 2. - DataOffset uint8 - FrameType uint8 - Channel uint16 -} - -// ParseHeader reads the header from r and returns the result. -// -// No validation is done. -func ParseHeader(r *buffer.Buffer) (Header, error) { - buf, ok := r.Next(8) - if !ok { - return Header{}, errors.New("invalid frameHeader") - } - _ = buf[7] - - fh := Header{ - Size: binary.BigEndian.Uint32(buf[0:4]), - DataOffset: buf[4], - FrameType: buf[5], - Channel: binary.BigEndian.Uint16(buf[6:8]), - } - - if fh.Size < HeaderSize { - return fh, fmt.Errorf("received frame header with invalid size %d", fh.Size) - } - - if fh.DataOffset < 2 { - return fh, fmt.Errorf("received frame header with invalid data offset %d", fh.DataOffset) - } - - return fh, nil -} - -// ParseBody reads and unmarshals an AMQP frame. -func ParseBody(r *buffer.Buffer) (FrameBody, error) { - payload := r.Bytes() - - if r.Len() < 3 || payload[0] != 0 || encoding.AMQPType(payload[1]) != encoding.TypeCodeSmallUlong { - return nil, errors.New("invalid frame body header") - } - - switch pType := encoding.AMQPType(payload[2]); pType { - case encoding.TypeCodeOpen: - t := new(PerformOpen) - err := t.Unmarshal(r) - return t, err - case encoding.TypeCodeBegin: - t := new(PerformBegin) - err := t.Unmarshal(r) - return t, err - case encoding.TypeCodeAttach: - t := new(PerformAttach) - err := t.Unmarshal(r) - return t, err - case encoding.TypeCodeFlow: - t := new(PerformFlow) - err := t.Unmarshal(r) - return t, err - case encoding.TypeCodeTransfer: - t := new(PerformTransfer) - err := t.Unmarshal(r) - return t, err - case encoding.TypeCodeDisposition: - t := new(PerformDisposition) - err := t.Unmarshal(r) - return t, err - case encoding.TypeCodeDetach: - t := new(PerformDetach) - err := t.Unmarshal(r) - return t, err - case encoding.TypeCodeEnd: - t := new(PerformEnd) - err := t.Unmarshal(r) - return t, err - case encoding.TypeCodeClose: - t := new(PerformClose) - err := t.Unmarshal(r) - return t, err - case encoding.TypeCodeSASLMechanism: - t := new(SASLMechanisms) - err := t.Unmarshal(r) - return t, err - case encoding.TypeCodeSASLChallenge: - t := new(SASLChallenge) - err := t.Unmarshal(r) - return t, err - case encoding.TypeCodeSASLOutcome: - t := new(SASLOutcome) - err := t.Unmarshal(r) - return t, err - default: - return nil, fmt.Errorf("unknown performative type %02x", pType) - } -} - -// Write encodes fr into buf. -// split out from conn.WriteFrame for testing purposes. -func Write(buf *buffer.Buffer, fr Frame) error { - // write header - buf.Append([]byte{ - 0, 0, 0, 0, // size, overwrite later - 2, // doff, see frameHeader.DataOffset comment - uint8(fr.Type), // frame type - }) - buf.AppendUint16(fr.Channel) // channel - - // write AMQP frame body - err := encoding.Marshal(buf, fr.Body) - if err != nil { - return err - } - - // validate size - if uint(buf.Len()) > math.MaxUint32 { - return errors.New("frame too large") - } - - // retrieve raw bytes - bufBytes := buf.Bytes() - - // write correct size - binary.BigEndian.PutUint32(bufBytes, uint32(len(bufBytes))) - return nil -} diff --git a/sdk/messaging/azeventhubs/internal/go-amqp/internal/queue/queue.go b/sdk/messaging/azeventhubs/internal/go-amqp/internal/queue/queue.go deleted file mode 100644 index 45d6f5af9daf..000000000000 --- a/sdk/messaging/azeventhubs/internal/go-amqp/internal/queue/queue.go +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright (c) Microsoft Corporation - -package queue - -import ( - "container/ring" -) - -// Holder provides synchronized access to a *Queue[T]. -type Holder[T any] struct { - // these channels work in tandem to provide exclusive access to the underlying *Queue[T]. - // each channel is created with a buffer size of one. - // empty behaves like a mutex when there's one or more messages in the queue. - // populated is like a semaphore when the queue is empty. - // the *Queue[T] is only ever in one channel. which channel depends on if it contains any items. - // the initial state is for empty to contain an empty queue. - empty chan *Queue[T] - populated chan *Queue[T] -} - -// NewHolder creates a new Holder[T] that contains the provided *Queue[T]. -func NewHolder[T any](q *Queue[T]) *Holder[T] { - h := &Holder[T]{ - empty: make(chan *Queue[T], 1), - populated: make(chan *Queue[T], 1), - } - h.Release(q) - return h -} - -// Acquire attempts to acquire the *Queue[T]. If the *Queue[T] has already been acquired the call blocks. -// When the *Queue[T] is no longer required, you MUST call Release() to relinquish acquisition. -func (h *Holder[T]) Acquire() *Queue[T] { - // the queue will be in only one of the channels, it doesn't matter which one - var q *Queue[T] - select { - case q = <-h.empty: - // empty queue - case q = <-h.populated: - // populated queue - } - return q -} - -// Wait returns a channel that's signaled when the *Queue[T] contains at least one item. -// When the *Queue[T] is no longer required, you MUST call Release() to relinquish acquisition. -func (h *Holder[T]) Wait() <-chan *Queue[T] { - return h.populated -} - -// Release returns the *Queue[T] back to the Holder[T]. -// Once the *Queue[T] has been released, it is no longer safe to call its methods. -func (h *Holder[T]) Release(q *Queue[T]) { - if q.Len() == 0 { - h.empty <- q - } else { - h.populated <- q - } -} - -// Len returns the length of the *Queue[T]. -func (h *Holder[T]) Len() int { - msgLen := 0 - select { - case q := <-h.empty: - h.empty <- q - case q := <-h.populated: - msgLen = q.Len() - h.populated <- q - } - return msgLen -} - -// Queue[T] is a segmented FIFO queue of Ts. -type Queue[T any] struct { - head *ring.Ring - tail *ring.Ring - size int -} - -// New creates a new instance of Queue[T]. -// - size is the size of each Queue segment -func New[T any](size int) *Queue[T] { - r := &ring.Ring{ - Value: &segment[T]{ - items: make([]*T, size), - }, - } - return &Queue[T]{ - head: r, - tail: r, - } -} - -// Enqueue adds the specified item to the end of the queue. -// If the current segment is full, a new segment is created. -func (q *Queue[T]) Enqueue(item T) { - for { - r := q.tail - seg := r.Value.(*segment[T]) - - if seg.tail < len(seg.items) { - seg.items[seg.tail] = &item - seg.tail++ - q.size++ - return - } - - // segment is full, can we advance? - if next := r.Next(); next != q.head { - q.tail = next - continue - } - - // no, add a new ring - r.Link(&ring.Ring{ - Value: &segment[T]{ - items: make([]*T, len(seg.items)), - }, - }) - - q.tail = r.Next() - } -} - -// Dequeue removes and returns the item from the front of the queue. -func (q *Queue[T]) Dequeue() *T { - r := q.head - seg := r.Value.(*segment[T]) - - if seg.tail == 0 { - // queue is empty - return nil - } - - // remove first item - item := seg.items[seg.head] - seg.items[seg.head] = nil - seg.head++ - q.size-- - - if seg.head == seg.tail { - // segment is now empty, reset indices - seg.head, seg.tail = 0, 0 - - // if we're not at the last ring, advance head to the next one - if q.head != q.tail { - q.head = r.Next() - } - } - - return item -} - -// Len returns the total count of enqueued items. -func (q *Queue[T]) Len() int { - return q.size -} - -type segment[T any] struct { - items []*T - head int - tail int -} diff --git a/sdk/messaging/azeventhubs/internal/go-amqp/internal/shared/shared.go b/sdk/messaging/azeventhubs/internal/go-amqp/internal/shared/shared.go deleted file mode 100644 index 867c1e932bf5..000000000000 --- a/sdk/messaging/azeventhubs/internal/go-amqp/internal/shared/shared.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (c) Microsoft Corporation - -package shared - -import ( - "encoding/base64" - "math/rand" - "sync" - "time" -) - -// lockedRand provides a rand source that is safe for concurrent use. -type lockedRand struct { - mu sync.Mutex - src *rand.Rand -} - -func (r *lockedRand) Read(p []byte) (int, error) { - r.mu.Lock() - defer r.mu.Unlock() - return r.src.Read(p) -} - -// package scoped rand source to avoid any issues with seeding -// of the global source. -var pkgRand = &lockedRand{ - src: rand.New(rand.NewSource(time.Now().UnixNano())), -} - -// RandString returns a base64 encoded string of n bytes. -func RandString(n int) string { - b := make([]byte, n) - // from math/rand, cannot fail - _, _ = pkgRand.Read(b) - return base64.RawURLEncoding.EncodeToString(b) -} diff --git a/sdk/messaging/azeventhubs/internal/go-amqp/link.go b/sdk/messaging/azeventhubs/internal/go-amqp/link.go deleted file mode 100644 index 27a257c7ecb3..000000000000 --- a/sdk/messaging/azeventhubs/internal/go-amqp/link.go +++ /dev/null @@ -1,390 +0,0 @@ -// Copyright (C) 2017 Kale Blankenship -// Portions Copyright (c) Microsoft Corporation - -package amqp - -import ( - "context" - "errors" - "fmt" - "sync" - - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp/internal/debug" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp/internal/encoding" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp/internal/frames" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp/internal/queue" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp/internal/shared" -) - -// linkKey uniquely identifies a link on a connection by name and direction. -// -// A link can be identified uniquely by the ordered tuple -// -// (source-container-id, target-container-id, name) -// -// On a single connection the container ID pairs can be abbreviated -// to a boolean flag indicating the direction of the link. -type linkKey struct { - name string - role encoding.Role // Local role: sender/receiver -} - -// link contains the common state and methods for sending and receiving links -type link struct { - key linkKey // Name and direction - handle uint32 // our handle - remoteHandle uint32 // remote's handle - dynamicAddr bool // request a dynamic link address from the server - - // frames destined for this link are added to this queue by Session.muxFrameToLink - rxQ *queue.Holder[frames.FrameBody] - - // used for gracefully closing link - close chan struct{} // signals a link's mux to shut down; DO NOT use this to check if a link has terminated, use done instead - closeOnce *sync.Once // closeOnce protects close from being closed multiple times - - done chan struct{} // closed when the link has terminated (mux exited); DO NOT wait on this from within a link's mux() as it will never trigger! - doneErr error // contains the mux error state; ONLY written to by the mux and MUST only be read from after done is closed! - closeErr error // contains the error state returned from closeLink(); ONLY closeLink() reads/writes this! - - session *Session // parent session - source *frames.Source // used for Receiver links - target *frames.Target // used for Sender links - properties map[encoding.Symbol]any // additional properties sent upon link attach - - // "The delivery-count is initialized by the sender when a link endpoint is created, - // and is incremented whenever a message is sent. Only the sender MAY independently - // modify this field. The receiver's value is calculated based on the last known - // value from the sender and any subsequent messages received on the link. Note that, - // despite its name, the delivery-count is not a count but a sequence number - // initialized at an arbitrary point by the sender." - deliveryCount uint32 - - // The current maximum number of messages that can be handled at the receiver endpoint of the link. Only the receiver endpoint - // can independently set this value. The sender endpoint sets this to the last known value seen from the receiver. - linkCredit uint32 - - senderSettleMode *SenderSettleMode - receiverSettleMode *ReceiverSettleMode - maxMessageSize uint64 - - closeInProgress bool // indicates that the detach performative has been sent -} - -func newLink(s *Session, r encoding.Role) link { - l := link{ - key: linkKey{shared.RandString(40), r}, - session: s, - close: make(chan struct{}), - closeOnce: &sync.Once{}, - done: make(chan struct{}), - } - - // set the segment size relative to respective window - var segmentSize int - if r == encoding.RoleReceiver { - segmentSize = int(s.incomingWindow) - } else { - segmentSize = int(s.outgoingWindow) - } - - l.rxQ = queue.NewHolder(queue.New[frames.FrameBody](segmentSize)) - return l -} - -// waitForFrame waits for an incoming frame to be queued. -// it returns the next frame from the queue, or an error. -// the error is either from the context or session.doneErr. -// not meant for consumption outside of link.go. -func (l *link) waitForFrame(ctx context.Context) (frames.FrameBody, error) { - select { - case <-ctx.Done(): - return nil, ctx.Err() - case <-l.session.done: - // session has terminated, no need to deallocate in this case - return nil, l.session.doneErr - case q := <-l.rxQ.Wait(): - // frame received - fr := q.Dequeue() - l.rxQ.Release(q) - return *fr, nil - } -} - -// attach sends the Attach performative to establish the link with its parent session. -// this is automatically called by the new*Link constructors. -func (l *link) attach(ctx context.Context, beforeAttach func(*frames.PerformAttach), afterAttach func(*frames.PerformAttach)) error { - if err := l.session.freeAbandonedLinks(ctx); err != nil { - return err - } - - // once the abandoned links have been cleaned up we can create our link - if err := l.session.allocateHandle(ctx, l); err != nil { - return err - } - - attach := &frames.PerformAttach{ - Name: l.key.name, - Handle: l.handle, - ReceiverSettleMode: l.receiverSettleMode, - SenderSettleMode: l.senderSettleMode, - MaxMessageSize: l.maxMessageSize, - Source: l.source, - Target: l.target, - Properties: l.properties, - } - - // link-specific configuration of the attach frame - beforeAttach(attach) - - if err := l.txFrameAndWait(ctx, attach); err != nil { - return err - } - - // wait for response - fr, err := l.waitForFrame(ctx) - if err != nil { - l.session.abandonLink(l) - return err - } - - resp, ok := fr.(*frames.PerformAttach) - if !ok { - debug.Log(1, "RX (link %p): unexpected attach response frame %T", l, fr) - if err := l.session.conn.Close(); err != nil { - return err - } - return &ConnError{inner: fmt.Errorf("unexpected attach response: %#v", fr)} - } - - // If the remote encounters an error during the attach it returns an Attach - // with no Source or Target. The remote then sends a Detach with an error. - // - // Note that if the application chooses not to create a terminus, the session - // endpoint will still create a link endpoint and issue an attach indicating - // that the link endpoint has no associated local terminus. In this case, the - // session endpoint MUST immediately detach the newly created link endpoint. - // - // http://docs.oasis-open.org/amqp/core/v1.0/csprd01/amqp-core-transport-v1.0-csprd01.html#doc-idp386144 - if resp.Source == nil && resp.Target == nil { - // wait for detach - fr, err := l.waitForFrame(ctx) - if err != nil { - // we timed out waiting for the peer to close the link, this really isn't an abandoned link. - // however, we still need to send the detach performative to ack the peer. - l.session.abandonLink(l) - return err - } - - detach, ok := fr.(*frames.PerformDetach) - if !ok { - if err := l.session.conn.Close(); err != nil { - return err - } - return &ConnError{inner: fmt.Errorf("unexpected frame while waiting for detach: %#v", fr)} - } - - // send return detach - fr = &frames.PerformDetach{ - Handle: l.handle, - Closed: true, - } - if err := l.txFrameAndWait(ctx, fr); err != nil { - return err - } - - if detach.Error == nil { - return fmt.Errorf("received detach with no error specified") - } - return detach.Error - } - - if l.maxMessageSize == 0 || resp.MaxMessageSize < l.maxMessageSize { - l.maxMessageSize = resp.MaxMessageSize - } - - // link-specific configuration post attach - afterAttach(resp) - - if err := l.setSettleModes(resp); err != nil { - // close the link as there's a mismatch on requested/supported settlement modes - dr := &frames.PerformDetach{ - Handle: l.handle, - Closed: true, - } - if err := l.txFrameAndWait(ctx, dr); err != nil { - return err - } - return err - } - - return nil -} - -// setSettleModes sets the settlement modes based on the resp frames.PerformAttach. -// -// If a settlement mode has been explicitly set locally and it was not honored by the -// server an error is returned. -func (l *link) setSettleModes(resp *frames.PerformAttach) error { - var ( - localRecvSettle = receiverSettleModeValue(l.receiverSettleMode) - respRecvSettle = receiverSettleModeValue(resp.ReceiverSettleMode) - ) - if l.receiverSettleMode != nil && localRecvSettle != respRecvSettle { - return fmt.Errorf("amqp: receiver settlement mode %q requested, received %q from server", l.receiverSettleMode, &respRecvSettle) - } - l.receiverSettleMode = &respRecvSettle - - var ( - localSendSettle = senderSettleModeValue(l.senderSettleMode) - respSendSettle = senderSettleModeValue(resp.SenderSettleMode) - ) - if l.senderSettleMode != nil && localSendSettle != respSendSettle { - return fmt.Errorf("amqp: sender settlement mode %q requested, received %q from server", l.senderSettleMode, &respSendSettle) - } - l.senderSettleMode = &respSendSettle - - return nil -} - -// muxHandleFrame processes fr based on type. -func (l *link) muxHandleFrame(fr frames.FrameBody) error { - switch fr := fr.(type) { - case *frames.PerformDetach: - if !fr.Closed { - l.closeWithError(ErrCondNotImplemented, fmt.Sprintf("non-closing detach not supported: %+v", fr)) - return nil - } - - // there are two possibilities: - // - this is the ack to a client-side Close() - // - the peer is closing the link so we must ack - - if l.closeInProgress { - // if the client-side close was initiated due to an error (l.closeWithError) - // then l.doneErr will already be set. in this case, return that error instead - // of an empty LinkError which indicates a clean client-side close. - if l.doneErr != nil { - return l.doneErr - } - return &LinkError{} - } - - dr := &frames.PerformDetach{ - Handle: l.handle, - Closed: true, - } - l.txFrame(context.Background(), dr, nil) - return &LinkError{RemoteErr: fr.Error} - - default: - debug.Log(1, "RX (link %p): unexpected frame: %s", l, fr) - l.closeWithError(ErrCondInternalError, fmt.Sprintf("link received unexpected frame %T", fr)) - return nil - } -} - -// Close closes the Sender and AMQP link. -func (l *link) closeLink(ctx context.Context) error { - var ctxErr error - l.closeOnce.Do(func() { - close(l.close) - - // once the mux has received the ack'ing detach performative, the mux will - // exit which deletes the link and closes l.done. - select { - case <-l.done: - l.closeErr = l.doneErr - case <-ctx.Done(): - // notify the caller that the close timed out/was cancelled. - // the mux will remain running and once the ack is received it will terminate. - ctxErr = ctx.Err() - - // record that the close timed out/was cancelled. - // subsequent calls to closeLink() will return this - debug.Log(1, "TX (link %p) closing %s: %v", l, l.key.name, ctxErr) - l.closeErr = &LinkError{inner: ctxErr} - } - }) - - if ctxErr != nil { - return ctxErr - } - - var linkErr *LinkError - if errors.As(l.closeErr, &linkErr) && linkErr.RemoteErr == nil && linkErr.inner == nil { - // an empty LinkError means the link was cleanly closed by the caller - return nil - } - return l.closeErr -} - -// closeWithError initiates closing the link with the specified AMQP error. -// the mux must continue to run until the ack'ing detach is received. -// l.doneErr is populated with a &LinkError{} containing an inner error constructed from the specified values -// - cnd is the AMQP error condition -// - desc is the error description -func (l *link) closeWithError(cnd ErrCond, desc string) { - amqpErr := &Error{Condition: cnd, Description: desc} - if l.closeInProgress { - debug.Log(3, "TX (link %p) close error already pending, discarding %v", l, amqpErr) - return - } - - dr := &frames.PerformDetach{ - Handle: l.handle, - Closed: true, - Error: amqpErr, - } - l.closeInProgress = true - l.doneErr = &LinkError{inner: fmt.Errorf("%s: %s", cnd, desc)} - l.txFrame(context.Background(), dr, nil) -} - -// txFrame sends the specified frame via the link's session. -// you MUST call this instead of session.txFrame() to ensure -// that frames are not sent during session shutdown. -func (l *link) txFrame(ctx context.Context, fr frames.FrameBody, sent chan error) { - // NOTE: there is no need to select on l.done as this is either - // called from a link's mux or before the mux has even started. - select { - case <-l.session.done: - if sent != nil { - sent <- l.session.doneErr - } - case <-l.session.endSent: - // we swallow this to prevent the link's mux from terminating. - // l.session.done will soon close so this is temporary. - return - case l.session.tx <- frameBodyEnvelope{Ctx: ctx, FrameBody: fr, Sent: sent}: - debug.Log(2, "TX (link %p): mux frame to Session (%p): %s", l, l.session, fr) - } -} - -// txFrame sends the specified frame via the link's session. -// you MUST call this instead of session.txFrame() to ensure -// that frames are not sent during session shutdown. -func (l *link) txFrameAndWait(ctx context.Context, fr frames.FrameBody) error { - // NOTE: there is no need to select on l.done as this is either - // called from a link's mux or before the mux has even started. - sent := make(chan error, 1) - select { - case <-l.session.done: - return l.session.doneErr - case <-l.session.endSent: - // we swallow this to prevent the link's mux from terminating. - // l.session.done will soon close so this is temporary. - return nil - case l.session.tx <- frameBodyEnvelope{Ctx: ctx, FrameBody: fr, Sent: sent}: - debug.Log(2, "TX (link %p): mux frame to Session (%p): %s", l, l.session, fr) - } - - select { - case err := <-sent: - return err - case <-l.done: - return l.doneErr - case <-l.session.done: - return l.session.doneErr - } -} diff --git a/sdk/messaging/azeventhubs/internal/go-amqp/link_options.go b/sdk/messaging/azeventhubs/internal/go-amqp/link_options.go deleted file mode 100644 index c4ba797007db..000000000000 --- a/sdk/messaging/azeventhubs/internal/go-amqp/link_options.go +++ /dev/null @@ -1,241 +0,0 @@ -// Copyright (C) 2017 Kale Blankenship -// Portions Copyright (c) Microsoft Corporation - -package amqp - -import ( - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp/internal/encoding" -) - -type SenderOptions struct { - // Capabilities is the list of extension capabilities the sender supports. - Capabilities []string - - // Durability indicates what state of the sender will be retained durably. - // - // Default: DurabilityNone. - Durability Durability - - // DynamicAddress indicates a dynamic address is to be used. - // Any specified address will be ignored. - // - // Default: false. - DynamicAddress bool - - // ExpiryPolicy determines when the expiry timer of the sender starts counting - // down from the timeout value. If the link is subsequently re-attached before - // the timeout is reached, the count down is aborted. - // - // Default: ExpirySessionEnd. - ExpiryPolicy ExpiryPolicy - - // ExpiryTimeout is the duration in seconds that the sender will be retained. - // - // Default: 0. - ExpiryTimeout uint32 - - // Name sets the name of the link. - // - // Link names must be unique per-connection and direction. - // - // Default: randomly generated. - Name string - - // Properties sets an entry in the link properties map sent to the server. - Properties map[string]any - - // RequestedReceiverSettleMode sets the requested receiver settlement mode. - // - // If a settlement mode is explicitly set and the server does not - // honor it an error will be returned during link attachment. - // - // Default: Accept the settlement mode set by the server, commonly ModeFirst. - RequestedReceiverSettleMode *ReceiverSettleMode - - // SettlementMode sets the settlement mode in use by this sender. - // - // Default: ModeMixed. - SettlementMode *SenderSettleMode - - // SourceAddress specifies the source address for this sender. - SourceAddress string - - // TargetCapabilities is the list of extension capabilities the sender desires. - TargetCapabilities []string - - // TargetDurability indicates what state of the peer will be retained durably. - // - // Default: DurabilityNone. - TargetDurability Durability - - // TargetExpiryPolicy determines when the expiry timer of the peer starts counting - // down from the timeout value. If the link is subsequently re-attached before - // the timeout is reached, the count down is aborted. - // - // Default: ExpirySessionEnd. - TargetExpiryPolicy ExpiryPolicy - - // TargetExpiryTimeout is the duration in seconds that the peer will be retained. - // - // Default: 0. - TargetExpiryTimeout uint32 -} - -type ReceiverOptions struct { - // Capabilities is the list of extension capabilities the receiver supports. - Capabilities []string - - // Credit specifies the maximum number of unacknowledged messages - // the sender can transmit. Once this limit is reached, no more messages - // will arrive until messages are acknowledged and settled. - // - // As messages are settled, any available credit will automatically be issued. - // - // Setting this to -1 requires manual management of link credit. - // Credits can be added with IssueCredit(), and links can also be - // drained with DrainCredit(). - // This should only be enabled when complete control of the link's - // flow control is required. - // - // Default: 1. - Credit int32 - - // Durability indicates what state of the receiver will be retained durably. - // - // Default: DurabilityNone. - Durability Durability - - // DynamicAddress indicates a dynamic address is to be used. - // Any specified address will be ignored. - // - // Default: false. - DynamicAddress bool - - // ExpiryPolicy determines when the expiry timer of the sender starts counting - // down from the timeout value. If the link is subsequently re-attached before - // the timeout is reached, the count down is aborted. - // - // Default: ExpirySessionEnd. - ExpiryPolicy ExpiryPolicy - - // ExpiryTimeout is the duration in seconds that the sender will be retained. - // - // Default: 0. - ExpiryTimeout uint32 - - // Filters contains the desired filters for this receiver. - // If the peer cannot fulfill the filters the link will be detached. - Filters []LinkFilter - - // MaxMessageSize sets the maximum message size that can - // be received on the link. - // - // A size of zero indicates no limit. - // - // Default: 0. - MaxMessageSize uint64 - - // Name sets the name of the link. - // - // Link names must be unique per-connection and direction. - // - // Default: randomly generated. - Name string - - // Properties sets an entry in the link properties map sent to the server. - Properties map[string]any - - // RequestedSenderSettleMode sets the requested sender settlement mode. - // - // If a settlement mode is explicitly set and the server does not - // honor it an error will be returned during link attachment. - // - // Default: Accept the settlement mode set by the server, commonly ModeMixed. - RequestedSenderSettleMode *SenderSettleMode - - // SettlementMode sets the settlement mode in use by this receiver. - // - // Default: ModeFirst. - SettlementMode *ReceiverSettleMode - - // TargetAddress specifies the target address for this receiver. - TargetAddress string - - // SourceCapabilities is the list of extension capabilities the receiver desires. - SourceCapabilities []string - - // SourceDurability indicates what state of the peer will be retained durably. - // - // Default: DurabilityNone. - SourceDurability Durability - - // SourceExpiryPolicy determines when the expiry timer of the peer starts counting - // down from the timeout value. If the link is subsequently re-attached before - // the timeout is reached, the count down is aborted. - // - // Default: ExpirySessionEnd. - SourceExpiryPolicy ExpiryPolicy - - // SourceExpiryTimeout is the duration in seconds that the peer will be retained. - // - // Default: 0. - SourceExpiryTimeout uint32 -} - -// LinkFilter is an advanced API for setting non-standard source filters. -// Please file an issue or open a PR if a standard filter is missing from this -// library. -// -// The name is the key for the filter map. It will be encoded as an AMQP symbol type. -// -// The code is the descriptor of the described type value. The domain-id and descriptor-id -// should be concatenated together. If 0 is passed as the code, the name will be used as -// the descriptor. -// -// The value is the value of the descriped types. Acceptable types for value are specific -// to the filter. -// -// Example: -// -// The standard selector-filter is defined as: -// -// -// -// In this case the name is "apache.org:selector-filter:string" and the code is -// 0x0000468C00000004. -// -// LinkSourceFilter("apache.org:selector-filter:string", 0x0000468C00000004, exampleValue) -// -// References: -// -// http://docs.oasis-open.org/amqp/core/v1.0/os/amqp-core-messaging-v1.0-os.html#type-filter-set -// http://docs.oasis-open.org/amqp/core/v1.0/os/amqp-core-types-v1.0-os.html#section-descriptor-values -type LinkFilter func(encoding.Filter) - -// NewLinkFilter creates a new LinkFilter with the specified values. -// Any preexisting link filter with the same name will be updated with the new code and value. -func NewLinkFilter(name string, code uint64, value any) LinkFilter { - return func(f encoding.Filter) { - var descriptor any - if code != 0 { - descriptor = code - } else { - descriptor = encoding.Symbol(name) - } - f[encoding.Symbol(name)] = &encoding.DescribedType{ - Descriptor: descriptor, - Value: value, - } - } -} - -// NewSelectorFilter creates a new selector filter (apache.org:selector-filter:string) with the specified filter value. -// Any preexisting selector filter will be updated with the new filter value. -func NewSelectorFilter(filter string) LinkFilter { - return NewLinkFilter(selectorFilter, selectorFilterCode, filter) -} - -const ( - selectorFilter = "apache.org:selector-filter:string" - selectorFilterCode = uint64(0x0000468C00000004) -) diff --git a/sdk/messaging/azeventhubs/internal/go-amqp/message.go b/sdk/messaging/azeventhubs/internal/go-amqp/message.go deleted file mode 100644 index 20df597b6dc6..000000000000 --- a/sdk/messaging/azeventhubs/internal/go-amqp/message.go +++ /dev/null @@ -1,492 +0,0 @@ -// Copyright (C) 2017 Kale Blankenship -// Portions Copyright (c) Microsoft Corporation - -package amqp - -import ( - "fmt" - "time" - - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp/internal/buffer" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp/internal/encoding" -) - -// Message is an AMQP message. -type Message struct { - // Message format code. - // - // The upper three octets of a message format code identify a particular message - // format. The lowest octet indicates the version of said message format. Any - // given version of a format is forwards compatible with all higher versions. - Format uint32 - - // The DeliveryTag can be up to 32 octets of binary data. - // Note that when mode one is enabled there will be no delivery tag. - DeliveryTag []byte - - // The header section carries standard delivery details about the transfer - // of a message through the AMQP network. - Header *MessageHeader - // If the header section is omitted the receiver MUST assume the appropriate - // default values (or the meaning implied by no value being set) for the - // fields within the header unless other target or node specific defaults - // have otherwise been set. - - // The delivery-annotations section is used for delivery-specific non-standard - // properties at the head of the message. Delivery annotations convey information - // from the sending peer to the receiving peer. - DeliveryAnnotations Annotations - // If the recipient does not understand the annotation it cannot be acted upon - // and its effects (such as any implied propagation) cannot be acted upon. - // Annotations might be specific to one implementation, or common to multiple - // implementations. The capabilities negotiated on link attach and on the source - // and target SHOULD be used to establish which annotations a peer supports. A - // registry of defined annotations and their meanings is maintained [AMQPDELANN]. - // The symbolic key "rejected" is reserved for the use of communicating error - // information regarding rejected messages. Any values associated with the - // "rejected" key MUST be of type error. - // - // If the delivery-annotations section is omitted, it is equivalent to a - // delivery-annotations section containing an empty map of annotations. - - // The message-annotations section is used for properties of the message which - // are aimed at the infrastructure. - Annotations Annotations - // The message-annotations section is used for properties of the message which - // are aimed at the infrastructure and SHOULD be propagated across every - // delivery step. Message annotations convey information about the message. - // Intermediaries MUST propagate the annotations unless the annotations are - // explicitly augmented or modified (e.g., by the use of the modified outcome). - // - // The capabilities negotiated on link attach and on the source and target can - // be used to establish which annotations a peer understands; however, in a - // network of AMQP intermediaries it might not be possible to know if every - // intermediary will understand the annotation. Note that for some annotations - // it might not be necessary for the intermediary to understand their purpose, - // i.e., they could be used purely as an attribute which can be filtered on. - // - // A registry of defined annotations and their meanings is maintained [AMQPMESSANN]. - // - // If the message-annotations section is omitted, it is equivalent to a - // message-annotations section containing an empty map of annotations. - - // The properties section is used for a defined set of standard properties of - // the message. - Properties *MessageProperties - // The properties section is part of the bare message; therefore, - // if retransmitted by an intermediary, it MUST remain unaltered. - - // The application-properties section is a part of the bare message used for - // structured application data. Intermediaries can use the data within this - // structure for the purposes of filtering or routing. - ApplicationProperties map[string]any - // The keys of this map are restricted to be of type string (which excludes - // the possibility of a null key) and the values are restricted to be of - // simple types only, that is, excluding map, list, and array types. - - // Data payloads. - // A data section contains opaque binary data. - Data [][]byte - - // Value payload. - // An amqp-value section contains a single AMQP value. - Value any - - // Sequence will contain AMQP sequence sections from the body of the message. - // An amqp-sequence section contains an AMQP sequence. - Sequence [][]any - - // The footer section is used for details about the message or delivery which - // can only be calculated or evaluated once the whole bare message has been - // constructed or seen (for example message hashes, HMACs, signatures and - // encryption details). - Footer Annotations - - deliveryID uint32 // used when sending disposition - settled bool // whether transfer was settled by sender -} - -// NewMessage returns a *Message with data as the payload. -// -// This constructor is intended as a helper for basic Messages with a -// single data payload. It is valid to construct a Message directly for -// more complex usages. -func NewMessage(data []byte) *Message { - return &Message{ - Data: [][]byte{data}, - } -} - -// GetData returns the first []byte from the Data field -// or nil if Data is empty. -func (m *Message) GetData() []byte { - if len(m.Data) < 1 { - return nil - } - return m.Data[0] -} - -// MarshalBinary encodes the message into binary form. -func (m *Message) MarshalBinary() ([]byte, error) { - buf := &buffer.Buffer{} - err := m.Marshal(buf) - return buf.Detach(), err -} - -func (m *Message) Marshal(wr *buffer.Buffer) error { - if m.Header != nil { - err := m.Header.Marshal(wr) - if err != nil { - return err - } - } - - if m.DeliveryAnnotations != nil { - encoding.WriteDescriptor(wr, encoding.TypeCodeDeliveryAnnotations) - err := encoding.Marshal(wr, m.DeliveryAnnotations) - if err != nil { - return err - } - } - - if m.Annotations != nil { - encoding.WriteDescriptor(wr, encoding.TypeCodeMessageAnnotations) - err := encoding.Marshal(wr, m.Annotations) - if err != nil { - return err - } - } - - if m.Properties != nil { - err := encoding.Marshal(wr, m.Properties) - if err != nil { - return err - } - } - - if m.ApplicationProperties != nil { - encoding.WriteDescriptor(wr, encoding.TypeCodeApplicationProperties) - err := encoding.Marshal(wr, m.ApplicationProperties) - if err != nil { - return err - } - } - - for _, data := range m.Data { - encoding.WriteDescriptor(wr, encoding.TypeCodeApplicationData) - err := encoding.WriteBinary(wr, data) - if err != nil { - return err - } - } - - if m.Value != nil { - encoding.WriteDescriptor(wr, encoding.TypeCodeAMQPValue) - err := encoding.Marshal(wr, m.Value) - if err != nil { - return err - } - } - - if m.Sequence != nil { - // the body can basically be one of three different types (value, data or sequence). - // When it's sequence it's actually _several_ sequence sections, one for each sub-array. - for _, v := range m.Sequence { - encoding.WriteDescriptor(wr, encoding.TypeCodeAMQPSequence) - err := encoding.Marshal(wr, v) - if err != nil { - return err - } - } - } - - if m.Footer != nil { - encoding.WriteDescriptor(wr, encoding.TypeCodeFooter) - err := encoding.Marshal(wr, m.Footer) - if err != nil { - return err - } - } - - return nil -} - -// UnmarshalBinary decodes the message from binary form. -func (m *Message) UnmarshalBinary(data []byte) error { - buf := buffer.New(data) - return m.Unmarshal(buf) -} - -func (m *Message) Unmarshal(r *buffer.Buffer) error { - // loop, decoding sections until bytes have been consumed - for r.Len() > 0 { - // determine type - type_, headerLength, err := encoding.PeekMessageType(r.Bytes()) - if err != nil { - return err - } - - var ( - section any - // section header is read from r before - // unmarshaling section is set to true - discardHeader = true - ) - switch encoding.AMQPType(type_) { - - case encoding.TypeCodeMessageHeader: - discardHeader = false - section = &m.Header - - case encoding.TypeCodeDeliveryAnnotations: - section = &m.DeliveryAnnotations - - case encoding.TypeCodeMessageAnnotations: - section = &m.Annotations - - case encoding.TypeCodeMessageProperties: - discardHeader = false - section = &m.Properties - - case encoding.TypeCodeApplicationProperties: - section = &m.ApplicationProperties - - case encoding.TypeCodeApplicationData: - r.Skip(int(headerLength)) - - var data []byte - err = encoding.Unmarshal(r, &data) - if err != nil { - return err - } - - m.Data = append(m.Data, data) - continue - - case encoding.TypeCodeAMQPSequence: - r.Skip(int(headerLength)) - - var data []any - err = encoding.Unmarshal(r, &data) - if err != nil { - return err - } - - m.Sequence = append(m.Sequence, data) - continue - - case encoding.TypeCodeFooter: - section = &m.Footer - - case encoding.TypeCodeAMQPValue: - section = &m.Value - - default: - return fmt.Errorf("unknown message section %#02x", type_) - } - - if discardHeader { - r.Skip(int(headerLength)) - } - - err = encoding.Unmarshal(r, section) - if err != nil { - return err - } - } - return nil -} - -/* - - - - - - - - -*/ - -// MessageHeader carries standard delivery details about the transfer -// of a message. -type MessageHeader struct { - Durable bool - Priority uint8 - TTL time.Duration // from milliseconds - FirstAcquirer bool - DeliveryCount uint32 -} - -func (h *MessageHeader) Marshal(wr *buffer.Buffer) error { - return encoding.MarshalComposite(wr, encoding.TypeCodeMessageHeader, []encoding.MarshalField{ - {Value: &h.Durable, Omit: !h.Durable}, - {Value: &h.Priority, Omit: h.Priority == 4}, - {Value: (*encoding.Milliseconds)(&h.TTL), Omit: h.TTL == 0}, - {Value: &h.FirstAcquirer, Omit: !h.FirstAcquirer}, - {Value: &h.DeliveryCount, Omit: h.DeliveryCount == 0}, - }) -} - -func (h *MessageHeader) Unmarshal(r *buffer.Buffer) error { - return encoding.UnmarshalComposite(r, encoding.TypeCodeMessageHeader, []encoding.UnmarshalField{ - {Field: &h.Durable}, - {Field: &h.Priority, HandleNull: func() error { h.Priority = 4; return nil }}, - {Field: (*encoding.Milliseconds)(&h.TTL)}, - {Field: &h.FirstAcquirer}, - {Field: &h.DeliveryCount}, - }...) -} - -/* - - - - - - - - - - - - - - - - -*/ - -// MessageProperties is the defined set of properties for AMQP messages. -type MessageProperties struct { - // Message-id, if set, uniquely identifies a message within the message system. - // The message producer is usually responsible for setting the message-id in - // such a way that it is assured to be globally unique. A broker MAY discard a - // message as a duplicate if the value of the message-id matches that of a - // previously received message sent to the same node. - // - // The value is restricted to the following types - // - uint64, UUID, []byte, or string - MessageID any - - // The identity of the user responsible for producing the message. - // The client sets this value, and it MAY be authenticated by intermediaries. - UserID []byte - - // The to field identifies the node that is the intended destination of the message. - // On any given transfer this might not be the node at the receiving end of the link. - To *string - - // A common field for summary information about the message content and purpose. - Subject *string - - // The address of the node to send replies to. - ReplyTo *string - - // This is a client-specific id that can be used to mark or identify messages - // between clients. - // - // The value is restricted to the following types - // - uint64, UUID, []byte, or string - CorrelationID any - - // The RFC-2046 [RFC2046] MIME type for the message's application-data section - // (body). As per RFC-2046 [RFC2046] this can contain a charset parameter defining - // the character encoding used: e.g., 'text/plain; charset="utf-8"'. - // - // For clarity, as per section 7.2.1 of RFC-2616 [RFC2616], where the content type - // is unknown the content-type SHOULD NOT be set. This allows the recipient the - // opportunity to determine the actual type. Where the section is known to be truly - // opaque binary data, the content-type SHOULD be set to application/octet-stream. - // - // When using an application-data section with a section code other than data, - // content-type SHOULD NOT be set. - ContentType *string - - // The content-encoding property is used as a modifier to the content-type. - // When present, its value indicates what additional content encodings have been - // applied to the application-data, and thus what decoding mechanisms need to be - // applied in order to obtain the media-type referenced by the content-type header - // field. - // - // Content-encoding is primarily used to allow a document to be compressed without - // losing the identity of its underlying content type. - // - // Content-encodings are to be interpreted as per section 3.5 of RFC 2616 [RFC2616]. - // Valid content-encodings are registered at IANA [IANAHTTPPARAMS]. - // - // The content-encoding MUST NOT be set when the application-data section is other - // than data. The binary representation of all other application-data section types - // is defined completely in terms of the AMQP type system. - // - // Implementations MUST NOT use the identity encoding. Instead, implementations - // SHOULD NOT set this property. Implementations SHOULD NOT use the compress encoding, - // except as to remain compatible with messages originally sent with other protocols, - // e.g. HTTP or SMTP. - // - // Implementations SHOULD NOT specify multiple content-encoding values except as to - // be compatible with messages originally sent with other protocols, e.g. HTTP or SMTP. - ContentEncoding *string - - // An absolute time when this message is considered to be expired. - AbsoluteExpiryTime *time.Time - - // An absolute time when this message was created. - CreationTime *time.Time - - // Identifies the group the message belongs to. - GroupID *string - - // The relative position of this message within its group. - // - // The value is defined as a RFC-1982 sequence number - GroupSequence *uint32 - - // This is a client-specific id that is used so that client can send replies to this - // message to a specific group. - ReplyToGroupID *string -} - -func (p *MessageProperties) Marshal(wr *buffer.Buffer) error { - return encoding.MarshalComposite(wr, encoding.TypeCodeMessageProperties, []encoding.MarshalField{ - {Value: p.MessageID, Omit: p.MessageID == nil}, - {Value: &p.UserID, Omit: len(p.UserID) == 0}, - {Value: p.To, Omit: p.To == nil}, - {Value: p.Subject, Omit: p.Subject == nil}, - {Value: p.ReplyTo, Omit: p.ReplyTo == nil}, - {Value: p.CorrelationID, Omit: p.CorrelationID == nil}, - {Value: (*encoding.Symbol)(p.ContentType), Omit: p.ContentType == nil}, - {Value: (*encoding.Symbol)(p.ContentEncoding), Omit: p.ContentEncoding == nil}, - {Value: p.AbsoluteExpiryTime, Omit: p.AbsoluteExpiryTime == nil}, - {Value: p.CreationTime, Omit: p.CreationTime == nil}, - {Value: p.GroupID, Omit: p.GroupID == nil}, - {Value: p.GroupSequence, Omit: p.GroupSequence == nil}, - {Value: p.ReplyToGroupID, Omit: p.ReplyToGroupID == nil}, - }) -} - -func (p *MessageProperties) Unmarshal(r *buffer.Buffer) error { - return encoding.UnmarshalComposite(r, encoding.TypeCodeMessageProperties, []encoding.UnmarshalField{ - {Field: &p.MessageID}, - {Field: &p.UserID}, - {Field: &p.To}, - {Field: &p.Subject}, - {Field: &p.ReplyTo}, - {Field: &p.CorrelationID}, - {Field: &p.ContentType}, - {Field: &p.ContentEncoding}, - {Field: &p.AbsoluteExpiryTime}, - {Field: &p.CreationTime}, - {Field: &p.GroupID}, - {Field: &p.GroupSequence}, - {Field: &p.ReplyToGroupID}, - }...) -} - -// Annotations keys must be of type string, int, or int64. -// -// String keys are encoded as AMQP Symbols. -type Annotations = encoding.Annotations - -// UUID is a 128 bit identifier as defined in RFC 4122. -type UUID = encoding.UUID diff --git a/sdk/messaging/azeventhubs/internal/go-amqp/receiver.go b/sdk/messaging/azeventhubs/internal/go-amqp/receiver.go deleted file mode 100644 index 88da28bac3ca..000000000000 --- a/sdk/messaging/azeventhubs/internal/go-amqp/receiver.go +++ /dev/null @@ -1,897 +0,0 @@ -// Copyright (C) 2017 Kale Blankenship -// Portions Copyright (c) Microsoft Corporation - -package amqp - -import ( - "bytes" - "context" - "errors" - "fmt" - "sync" - - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp/internal/buffer" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp/internal/debug" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp/internal/encoding" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp/internal/frames" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp/internal/queue" -) - -// Default link options -const ( - defaultLinkCredit = 1 -) - -// Receiver receives messages on a single AMQP link. -type Receiver struct { - l link - - // message receiving - receiverReady chan struct{} // receiver sends on this when mux is paused to indicate it can handle more messages - messagesQ *queue.Holder[Message] // used to send completed messages to receiver - txDisposition chan frameBodyEnvelope // used to funnel disposition frames through the mux - - unsettledMessages map[string]struct{} // used to keep track of messages being handled downstream - unsettledMessagesLock sync.RWMutex // lock to protect concurrent access to unsettledMessages - msgBuf buffer.Buffer // buffered bytes for current message - more bool // if true, buf contains a partial message - msg Message // current message being decoded - - settlementCount uint32 // the count of settled messages - settlementCountMu sync.Mutex // must be held when accessing settlementCount - - autoSendFlow bool // automatically send flow frames as credit becomes available - inFlight inFlight // used to track message disposition when rcv-settle-mode == second - creditor creditor // manages credits via calls to IssueCredit/DrainCredit -} - -// IssueCredit adds credits to be requested in the next flow request. -// Attempting to issue more credit than the receiver's max credit as -// specified in ReceiverOptions.MaxCredit will result in an error. -func (r *Receiver) IssueCredit(credit uint32) error { - if r.autoSendFlow { - return errors.New("issueCredit can only be used with receiver links using manual credit management") - } - - if err := r.creditor.IssueCredit(credit); err != nil { - return err - } - - // cause mux() to check our flow conditions. - select { - case r.receiverReady <- struct{}{}: - default: - } - - return nil -} - -// Prefetched returns the next message that is stored in the Receiver's -// prefetch cache. It does NOT wait for the remote sender to send messages -// and returns immediately if the prefetch cache is empty. To receive from the -// prefetch and wait for messages from the remote Sender use `Receive`. -// -// Once a message is received, and if the sender is configured in any mode other -// than SenderSettleModeSettled, you *must* take an action on the message by calling -// one of the following: AcceptMessage, RejectMessage, ReleaseMessage, ModifyMessage. -func (r *Receiver) Prefetched() *Message { - select { - case r.receiverReady <- struct{}{}: - default: - } - - // non-blocking receive to ensure buffered messages are - // delivered regardless of whether the link has been closed. - q := r.messagesQ.Acquire() - msg := q.Dequeue() - r.messagesQ.Release(q) - - if msg == nil { - return nil - } - - debug.Log(3, "RX (Receiver %p): prefetched delivery ID %d", r, msg.deliveryID) - - if msg.settled { - r.onSettlement(1) - } - - return msg -} - -// ReceiveOptions contains any optional values for the Receiver.Receive method. -type ReceiveOptions struct { - // for future expansion -} - -// Receive returns the next message from the sender. -// Blocks until a message is received, ctx completes, or an error occurs. -// -// Once a message is received, and if the sender is configured in any mode other -// than SenderSettleModeSettled, you *must* take an action on the message by calling -// one of the following: AcceptMessage, RejectMessage, ReleaseMessage, ModifyMessage. -func (r *Receiver) Receive(ctx context.Context, opts *ReceiveOptions) (*Message, error) { - if msg := r.Prefetched(); msg != nil { - return msg, nil - } - - // wait for the next message - select { - case q := <-r.messagesQ.Wait(): - msg := q.Dequeue() - debug.Assert(msg != nil) - debug.Log(3, "RX (Receiver %p): received delivery ID %d", r, msg.deliveryID) - r.messagesQ.Release(q) - if msg.settled { - r.onSettlement(1) - } - return msg, nil - case <-r.l.done: - // if the link receives messages and is then closed between the above call to r.Prefetched() - // and this select statement, the order of selecting r.messages and r.l.done is undefined. - // however, once r.l.done is closed the link cannot receive any more messages. so be sure to - // drain any that might have trickled in within this window. - if msg := r.Prefetched(); msg != nil { - return msg, nil - } - return nil, r.l.doneErr - case <-ctx.Done(): - return nil, ctx.Err() - } -} - -// Accept notifies the server that the message has been accepted and does not require redelivery. -// - ctx controls waiting for the peer to acknowledge the disposition -// - msg is the message to accept -// -// If the context's deadline expires or is cancelled before the operation -// completes, the message's disposition is in an unknown state. -func (r *Receiver) AcceptMessage(ctx context.Context, msg *Message) error { - return r.messageDisposition(ctx, msg, &encoding.StateAccepted{}) -} - -// Reject notifies the server that the message is invalid. -// - ctx controls waiting for the peer to acknowledge the disposition -// - msg is the message to reject -// - e is an optional rejection error -// -// If the context's deadline expires or is cancelled before the operation -// completes, the message's disposition is in an unknown state. -func (r *Receiver) RejectMessage(ctx context.Context, msg *Message, e *Error) error { - return r.messageDisposition(ctx, msg, &encoding.StateRejected{Error: e}) -} - -// Release releases the message back to the server. The message may be redelivered to this or another consumer. -// - ctx controls waiting for the peer to acknowledge the disposition -// - msg is the message to release -// -// If the context's deadline expires or is cancelled before the operation -// completes, the message's disposition is in an unknown state. -func (r *Receiver) ReleaseMessage(ctx context.Context, msg *Message) error { - return r.messageDisposition(ctx, msg, &encoding.StateReleased{}) -} - -// Modify notifies the server that the message was not acted upon and should be modifed. -// - ctx controls waiting for the peer to acknowledge the disposition -// - msg is the message to modify -// - options contains the optional settings to modify -// -// If the context's deadline expires or is cancelled before the operation -// completes, the message's disposition is in an unknown state. -func (r *Receiver) ModifyMessage(ctx context.Context, msg *Message, options *ModifyMessageOptions) error { - if options == nil { - options = &ModifyMessageOptions{} - } - return r.messageDisposition(ctx, - msg, &encoding.StateModified{ - DeliveryFailed: options.DeliveryFailed, - UndeliverableHere: options.UndeliverableHere, - MessageAnnotations: options.Annotations, - }) -} - -// ModifyMessageOptions contains the optional parameters to ModifyMessage. -type ModifyMessageOptions struct { - // DeliveryFailed indicates that the server must consider this an - // unsuccessful delivery attempt and increment the delivery count. - DeliveryFailed bool - - // UndeliverableHere indicates that the server must not redeliver - // the message to this link. - UndeliverableHere bool - - // Annotations is an optional annotation map to be merged - // with the existing message annotations, overwriting existing keys - // if necessary. - Annotations Annotations -} - -// Address returns the link's address. -func (r *Receiver) Address() string { - if r.l.source == nil { - return "" - } - return r.l.source.Address -} - -// LinkName returns associated link name or an empty string if link is not defined. -func (r *Receiver) LinkName() string { - return r.l.key.name -} - -// LinkSourceFilterValue retrieves the specified link source filter value or nil if it doesn't exist. -func (r *Receiver) LinkSourceFilterValue(name string) any { - if r.l.source == nil { - return nil - } - filter, ok := r.l.source.Filter[encoding.Symbol(name)] - if !ok { - return nil - } - return filter.Value -} - -// Close closes the Receiver and AMQP link. -// - ctx controls waiting for the peer to acknowledge the close -// -// If the context's deadline expires or is cancelled before the operation -// completes, an error is returned. However, the operation will continue to -// execute in the background. Subsequent calls will return a *LinkError -// that contains the context's error message. -func (r *Receiver) Close(ctx context.Context) error { - return r.l.closeLink(ctx) -} - -// sendDisposition sends a disposition frame to the peer -func (r *Receiver) sendDisposition(ctx context.Context, first uint32, last *uint32, state encoding.DeliveryState) error { - fr := &frames.PerformDisposition{ - Role: encoding.RoleReceiver, - First: first, - Last: last, - Settled: r.l.receiverSettleMode == nil || *r.l.receiverSettleMode == ReceiverSettleModeFirst, - State: state, - } - - sent := make(chan error, 1) - select { - case r.txDisposition <- frameBodyEnvelope{Ctx: ctx, FrameBody: fr, Sent: sent}: - debug.Log(2, "TX (Receiver %p): mux txDisposition %s", r, fr) - case <-r.l.done: - return r.l.doneErr - } - - select { - case err := <-sent: - return err - case <-r.l.done: - return r.l.doneErr - } -} - -func (r *Receiver) messageDisposition(ctx context.Context, msg *Message, state encoding.DeliveryState) error { - if msg.settled { - return nil - } - - // NOTE: we MUST add to the in-flight map before sending the disposition. if not, it's possible - // to receive the ack'ing disposition frame *before* the in-flight map has been updated which - // will cause the below <-wait to never trigger. - - var wait chan error - if r.l.receiverSettleMode != nil && *r.l.receiverSettleMode == ReceiverSettleModeSecond { - debug.Log(3, "TX (Receiver %p): delivery ID %d is in flight", r, msg.deliveryID) - wait = r.inFlight.add(msg) - } - - if err := r.sendDisposition(ctx, msg.deliveryID, nil, state); err != nil { - return err - } - - if wait == nil { - // mode first, there will be no settlement ack - r.deleteUnsettled(msg) - r.onSettlement(1) - return nil - } - - select { - case err := <-wait: - // err has three possibilities - // - nil, meaning the peer acknowledged the settlement - // - an *Error, meaning the peer rejected the message with a provided error - // - a non-AMQP error. this comes from calls to inFlight.clear() during mux unwind. - // only for the first two cases is the message considered settled - - if amqpErr := (&Error{}); err == nil || errors.As(err, &amqpErr) { - debug.Log(3, "RX (Receiver %p): delivery ID %d has been settled", r, msg.deliveryID) - // we've received confirmation of disposition - return err - } - - debug.Log(3, "RX (Receiver %p): error settling delivery ID %d: %v", r, msg.deliveryID, err) - return err - - case <-ctx.Done(): - // didn't receive the ack in the time allotted, leave message as unsettled - // TODO: if the ack arrives later, we need to remove the message from the unsettled map and reclaim the credit - return ctx.Err() - } -} - -// onSettlement is to be called after message settlement. -// - count is the number of messages that were settled -func (r *Receiver) onSettlement(count uint32) { - if !r.autoSendFlow { - return - } - - r.settlementCountMu.Lock() - r.settlementCount += count - r.settlementCountMu.Unlock() - - select { - case r.receiverReady <- struct{}{}: - // woke up - default: - // wake pending - } -} - -func (r *Receiver) addUnsettled(msg *Message) { - r.unsettledMessagesLock.Lock() - r.unsettledMessages[string(msg.DeliveryTag)] = struct{}{} - r.unsettledMessagesLock.Unlock() -} - -func (r *Receiver) deleteUnsettled(msg *Message) { - r.unsettledMessagesLock.Lock() - delete(r.unsettledMessages, string(msg.DeliveryTag)) - r.unsettledMessagesLock.Unlock() -} - -func (r *Receiver) countUnsettled() int { - r.unsettledMessagesLock.RLock() - count := len(r.unsettledMessages) - r.unsettledMessagesLock.RUnlock() - return count -} - -func newReceiver(source string, session *Session, opts *ReceiverOptions) (*Receiver, error) { - l := newLink(session, encoding.RoleReceiver) - l.source = &frames.Source{Address: source} - l.target = new(frames.Target) - l.linkCredit = defaultLinkCredit - r := &Receiver{ - l: l, - autoSendFlow: true, - receiverReady: make(chan struct{}, 1), - txDisposition: make(chan frameBodyEnvelope), - } - - r.messagesQ = queue.NewHolder(queue.New[Message](int(session.incomingWindow))) - - if opts == nil { - return r, nil - } - - for _, v := range opts.Capabilities { - r.l.target.Capabilities = append(r.l.target.Capabilities, encoding.Symbol(v)) - } - if opts.Credit > 0 { - r.l.linkCredit = uint32(opts.Credit) - } else if opts.Credit < 0 { - r.l.linkCredit = 0 - r.autoSendFlow = false - } - if opts.Durability > DurabilityUnsettledState { - return nil, fmt.Errorf("invalid Durability %d", opts.Durability) - } - r.l.target.Durable = opts.Durability - if opts.DynamicAddress { - r.l.source.Address = "" - r.l.dynamicAddr = opts.DynamicAddress - } - if opts.ExpiryPolicy != "" { - if err := encoding.ValidateExpiryPolicy(opts.ExpiryPolicy); err != nil { - return nil, err - } - r.l.target.ExpiryPolicy = opts.ExpiryPolicy - } - r.l.target.Timeout = opts.ExpiryTimeout - if opts.Filters != nil { - r.l.source.Filter = make(encoding.Filter) - for _, f := range opts.Filters { - f(r.l.source.Filter) - } - } - if opts.MaxMessageSize > 0 { - r.l.maxMessageSize = opts.MaxMessageSize - } - if opts.Name != "" { - r.l.key.name = opts.Name - } - if opts.Properties != nil { - r.l.properties = make(map[encoding.Symbol]any) - for k, v := range opts.Properties { - if k == "" { - return nil, errors.New("link property key must not be empty") - } - r.l.properties[encoding.Symbol(k)] = v - } - } - if opts.RequestedSenderSettleMode != nil { - if rsm := *opts.RequestedSenderSettleMode; rsm > SenderSettleModeMixed { - return nil, fmt.Errorf("invalid RequestedSenderSettleMode %d", rsm) - } - r.l.senderSettleMode = opts.RequestedSenderSettleMode - } - if opts.SettlementMode != nil { - if rsm := *opts.SettlementMode; rsm > ReceiverSettleModeSecond { - return nil, fmt.Errorf("invalid SettlementMode %d", rsm) - } - r.l.receiverSettleMode = opts.SettlementMode - } - r.l.target.Address = opts.TargetAddress - for _, v := range opts.SourceCapabilities { - r.l.source.Capabilities = append(r.l.source.Capabilities, encoding.Symbol(v)) - } - if opts.SourceDurability != DurabilityNone { - r.l.source.Durable = opts.SourceDurability - } - if opts.SourceExpiryPolicy != ExpiryPolicySessionEnd { - r.l.source.ExpiryPolicy = opts.SourceExpiryPolicy - } - if opts.SourceExpiryTimeout != 0 { - r.l.source.Timeout = opts.SourceExpiryTimeout - } - return r, nil -} - -// attach sends the Attach performative to establish the link with its parent session. -// this is automatically called by the new*Link constructors. -func (r *Receiver) attach(ctx context.Context) error { - if err := r.l.attach(ctx, func(pa *frames.PerformAttach) { - pa.Role = encoding.RoleReceiver - if pa.Source == nil { - pa.Source = new(frames.Source) - } - pa.Source.Dynamic = r.l.dynamicAddr - }, func(pa *frames.PerformAttach) { - if r.l.source == nil { - r.l.source = new(frames.Source) - } - // if dynamic address requested, copy assigned name to address - if r.l.dynamicAddr && pa.Source != nil { - r.l.source.Address = pa.Source.Address - } - // deliveryCount is a sequence number, must initialize to sender's initial sequence number - r.l.deliveryCount = pa.InitialDeliveryCount - r.unsettledMessages = map[string]struct{}{} - // copy the received filter values - if pa.Source != nil { - r.l.source.Filter = pa.Source.Filter - } - }); err != nil { - return err - } - - return nil -} - -func nop() {} - -type receiverTestHooks struct { - MuxStart func() - MuxSelect func() -} - -func (r *Receiver) mux(hooks receiverTestHooks) { - if hooks.MuxSelect == nil { - hooks.MuxSelect = nop - } - if hooks.MuxStart == nil { - hooks.MuxStart = nop - } - - defer func() { - // unblock any in flight message dispositions - r.inFlight.clear(r.l.doneErr) - - if !r.autoSendFlow { - // unblock any pending drain requests - r.creditor.EndDrain() - } - - close(r.l.done) - }() - - hooks.MuxStart() - - if r.autoSendFlow { - r.l.doneErr = r.muxFlow(r.l.linkCredit, false) - } - - for { - msgLen := r.messagesQ.Len() - - r.settlementCountMu.Lock() - // counter that accumulates the settled delivery count. - // once the threshold has been reached, the counter is - // reset and a flow frame is sent. - previousSettlementCount := r.settlementCount - if previousSettlementCount >= r.l.linkCredit { - r.settlementCount = 0 - } - r.settlementCountMu.Unlock() - - // once we have pending credit equal to or greater than our available credit, reclaim it. - // we do this instead of settlementCount > 0 to prevent flow frames from being too chatty. - // NOTE: we compare the settlementCount against the current link credit instead of some - // fixed threshold to ensure credit is reclaimed in cases where the number of unsettled - // messages remains high for whatever reason. - if r.autoSendFlow && previousSettlementCount > 0 && previousSettlementCount >= r.l.linkCredit { - debug.Log(1, "RX (Receiver %p) (auto): source: %q, inflight: %d, linkCredit: %d, deliveryCount: %d, messages: %d, unsettled: %d, settlementCount: %d, settleMode: %s", - r, r.l.source.Address, r.inFlight.len(), r.l.linkCredit, r.l.deliveryCount, msgLen, r.countUnsettled(), previousSettlementCount, r.l.receiverSettleMode.String()) - r.l.doneErr = r.creditor.IssueCredit(previousSettlementCount) - } else if r.l.linkCredit == 0 { - debug.Log(1, "RX (Receiver %p) (pause): source: %q, inflight: %d, linkCredit: %d, deliveryCount: %d, messages: %d, unsettled: %d, settlementCount: %d, settleMode: %s", - r, r.l.source.Address, r.inFlight.len(), r.l.linkCredit, r.l.deliveryCount, msgLen, r.countUnsettled(), previousSettlementCount, r.l.receiverSettleMode.String()) - } - - if r.l.doneErr != nil { - return - } - - drain, credits := r.creditor.FlowBits(r.l.linkCredit) - if drain || credits > 0 { - debug.Log(1, "RX (Receiver %p) (flow): source: %q, inflight: %d, curLinkCredit: %d, newLinkCredit: %d, drain: %v, deliveryCount: %d, messages: %d, unsettled: %d, settlementCount: %d, settleMode: %s", - r, r.l.source.Address, r.inFlight.len(), r.l.linkCredit, credits, drain, r.l.deliveryCount, msgLen, r.countUnsettled(), previousSettlementCount, r.l.receiverSettleMode.String()) - - // send a flow frame. - r.l.doneErr = r.muxFlow(credits, drain) - } - - if r.l.doneErr != nil { - return - } - - txDisposition := r.txDisposition - closed := r.l.close - if r.l.closeInProgress { - // swap out channel so it no longer triggers - closed = nil - - // disable sending of disposition frames once closing is in progress. - // this is to prevent races between mux shutdown and clearing of - // any in-flight dispositions. - txDisposition = nil - } - - hooks.MuxSelect() - - select { - case q := <-r.l.rxQ.Wait(): - // populated queue - fr := *q.Dequeue() - r.l.rxQ.Release(q) - - // if muxHandleFrame returns an error it means the mux must terminate. - // note that in the case of a client-side close due to an error, nil - // is returned in order to keep the mux running to ack the detach frame. - if err := r.muxHandleFrame(fr); err != nil { - r.l.doneErr = err - return - } - - case env := <-txDisposition: - r.l.txFrame(env.Ctx, env.FrameBody, env.Sent) - - case <-r.receiverReady: - continue - - case <-closed: - if r.l.closeInProgress { - // a client-side close due to protocol error is in progress - continue - } - - // receiver is being closed by the client - r.l.closeInProgress = true - fr := &frames.PerformDetach{ - Handle: r.l.handle, - Closed: true, - } - r.l.txFrame(context.Background(), fr, nil) - - case <-r.l.session.done: - r.l.doneErr = r.l.session.doneErr - return - } - } -} - -// muxFlow sends tr to the session mux. -// l.linkCredit will also be updated to `linkCredit` -func (r *Receiver) muxFlow(linkCredit uint32, drain bool) error { - var ( - deliveryCount = r.l.deliveryCount - ) - - fr := &frames.PerformFlow{ - Handle: &r.l.handle, - DeliveryCount: &deliveryCount, - LinkCredit: &linkCredit, // max number of messages, - Drain: drain, - } - - // Update credit. This must happen before entering loop below - // because incoming messages handled while waiting to transmit - // flow increment deliveryCount. This causes the credit to become - // out of sync with the server. - - if !drain { - // if we're draining we don't want to touch our internal credit - we're not changing it so any issued credits - // are still valid until drain completes, at which point they will be naturally zeroed. - r.l.linkCredit = linkCredit - } - - select { - case r.l.session.tx <- frameBodyEnvelope{Ctx: context.Background(), FrameBody: fr}: - debug.Log(2, "TX (Receiver %p): mux frame to Session (%p): %d, %s", r, r.l.session, r.l.session.channel, fr) - return nil - case <-r.l.close: - return nil - case <-r.l.session.done: - return r.l.session.doneErr - } -} - -// muxHandleFrame processes fr based on type. -func (r *Receiver) muxHandleFrame(fr frames.FrameBody) error { - debug.Log(2, "RX (Receiver %p): %s", r, fr) - switch fr := fr.(type) { - // message frame - case *frames.PerformTransfer: - r.muxReceive(*fr) - - // flow control frame - case *frames.PerformFlow: - if !fr.Echo { - // if the 'drain' flag has been set in the frame sent to the _receiver_ then - // we signal whomever is waiting (the service has seen and acknowledged our drain) - if fr.Drain && !r.autoSendFlow { - r.l.linkCredit = 0 // we have no active credits at this point. - r.creditor.EndDrain() - } - return nil - } - - var ( - // copy because sent by pointer below; prevent race - linkCredit = r.l.linkCredit - deliveryCount = r.l.deliveryCount - ) - - // send flow - resp := &frames.PerformFlow{ - Handle: &r.l.handle, - DeliveryCount: &deliveryCount, - LinkCredit: &linkCredit, // max number of messages - } - - select { - case r.l.session.tx <- frameBodyEnvelope{Ctx: context.Background(), FrameBody: resp}: - debug.Log(2, "TX (Receiver %p): mux frame to Session (%p): %d, %s", r, r.l.session, r.l.session.channel, resp) - case <-r.l.close: - return nil - case <-r.l.session.done: - return r.l.session.doneErr - } - - case *frames.PerformDisposition: - // Unblock receivers waiting for message disposition - // bubble disposition error up to the receiver - var dispositionError error - if state, ok := fr.State.(*encoding.StateRejected); ok { - // state.Error isn't required to be filled out. For instance if you dead letter a message - // you will get a rejected response that doesn't contain an error. - if state.Error != nil { - dispositionError = state.Error - } - } - // removal from the in-flight map will also remove the message from the unsettled map - count := r.inFlight.remove(fr.First, fr.Last, dispositionError, func(msg *Message) { - r.deleteUnsettled(msg) - msg.settled = true - }) - r.onSettlement(count) - - default: - return r.l.muxHandleFrame(fr) - } - - return nil -} - -func (r *Receiver) muxReceive(fr frames.PerformTransfer) { - if !r.more { - // this is the first transfer of a message, - // record the delivery ID, message format, - // and delivery Tag - if fr.DeliveryID != nil { - r.msg.deliveryID = *fr.DeliveryID - } - if fr.MessageFormat != nil { - r.msg.Format = *fr.MessageFormat - } - r.msg.DeliveryTag = fr.DeliveryTag - - // these fields are required on first transfer of a message - if fr.DeliveryID == nil { - r.l.closeWithError(ErrCondNotAllowed, "received message without a delivery-id") - return - } - if fr.MessageFormat == nil { - r.l.closeWithError(ErrCondNotAllowed, "received message without a message-format") - return - } - if fr.DeliveryTag == nil { - r.l.closeWithError(ErrCondNotAllowed, "received message without a delivery-tag") - return - } - } else { - // this is a continuation of a multipart message - // some fields may be omitted on continuation transfers, - // but if they are included they must be consistent - // with the first. - - if fr.DeliveryID != nil && *fr.DeliveryID != r.msg.deliveryID { - msg := fmt.Sprintf( - "received continuation transfer with inconsistent delivery-id: %d != %d", - *fr.DeliveryID, r.msg.deliveryID, - ) - r.l.closeWithError(ErrCondNotAllowed, msg) - return - } - if fr.MessageFormat != nil && *fr.MessageFormat != r.msg.Format { - msg := fmt.Sprintf( - "received continuation transfer with inconsistent message-format: %d != %d", - *fr.MessageFormat, r.msg.Format, - ) - r.l.closeWithError(ErrCondNotAllowed, msg) - return - } - if fr.DeliveryTag != nil && !bytes.Equal(fr.DeliveryTag, r.msg.DeliveryTag) { - msg := fmt.Sprintf( - "received continuation transfer with inconsistent delivery-tag: %q != %q", - fr.DeliveryTag, r.msg.DeliveryTag, - ) - r.l.closeWithError(ErrCondNotAllowed, msg) - return - } - } - - // discard message if it's been aborted - if fr.Aborted { - r.msgBuf.Reset() - r.msg = Message{} - r.more = false - return - } - - // ensure maxMessageSize will not be exceeded - if r.l.maxMessageSize != 0 && uint64(r.msgBuf.Len())+uint64(len(fr.Payload)) > r.l.maxMessageSize { - r.l.closeWithError(ErrCondMessageSizeExceeded, fmt.Sprintf("received message larger than max size of %d", r.l.maxMessageSize)) - return - } - - // add the payload the the buffer - r.msgBuf.Append(fr.Payload) - - // mark as settled if at least one frame is settled - r.msg.settled = r.msg.settled || fr.Settled - - // save in-progress status - r.more = fr.More - - if fr.More { - return - } - - // last frame in message - err := r.msg.Unmarshal(&r.msgBuf) - if err != nil { - r.l.closeWithError(ErrCondInternalError, err.Error()) - return - } - - // send to receiver - if !r.msg.settled { - r.addUnsettled(&r.msg) - debug.Log(3, "RX (Receiver %p): add unsettled delivery ID %d", r, r.msg.deliveryID) - } - - q := r.messagesQ.Acquire() - q.Enqueue(r.msg) - msgLen := q.Len() - r.messagesQ.Release(q) - - // reset progress - r.msgBuf.Reset() - r.msg = Message{} - - // decrement link-credit after entire message received - r.l.deliveryCount++ - r.l.linkCredit-- - debug.Log(3, "RX (Receiver %p) link %s - deliveryCount: %d, linkCredit: %d, len(messages): %d", r, r.l.key.name, r.l.deliveryCount, r.l.linkCredit, msgLen) -} - -// inFlight tracks in-flight message dispositions allowing receivers -// to block waiting for the server to respond when an appropriate -// settlement mode is configured. -type inFlight struct { - mu sync.RWMutex - m map[uint32]inFlightInfo -} - -type inFlightInfo struct { - wait chan error - msg *Message -} - -func (f *inFlight) add(msg *Message) chan error { - wait := make(chan error, 1) - - f.mu.Lock() - if f.m == nil { - f.m = make(map[uint32]inFlightInfo) - } - - f.m[msg.deliveryID] = inFlightInfo{wait: wait, msg: msg} - f.mu.Unlock() - - return wait -} - -func (f *inFlight) remove(first uint32, last *uint32, err error, handler func(*Message)) uint32 { - f.mu.Lock() - - if f.m == nil { - f.mu.Unlock() - return 0 - } - - ll := first - if last != nil { - ll = *last - } - - count := uint32(0) - for i := first; i <= ll; i++ { - info, ok := f.m[i] - if ok { - handler(info.msg) - info.wait <- err - delete(f.m, i) - count++ - } - } - - f.mu.Unlock() - return count -} - -func (f *inFlight) clear(err error) { - f.mu.Lock() - for id, info := range f.m { - info.wait <- err - delete(f.m, id) - } - f.mu.Unlock() -} - -func (f *inFlight) len() int { - f.mu.RLock() - defer f.mu.RUnlock() - return len(f.m) -} diff --git a/sdk/messaging/azeventhubs/internal/go-amqp/sasl.go b/sdk/messaging/azeventhubs/internal/go-amqp/sasl.go deleted file mode 100644 index 11d185140dbf..000000000000 --- a/sdk/messaging/azeventhubs/internal/go-amqp/sasl.go +++ /dev/null @@ -1,262 +0,0 @@ -// Copyright (C) 2017 Kale Blankenship -// Portions Copyright (c) Microsoft Corporation - -package amqp - -import ( - "context" - "fmt" - - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp/internal/debug" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp/internal/encoding" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp/internal/frames" -) - -// SASL Mechanisms -const ( - saslMechanismPLAIN encoding.Symbol = "PLAIN" - saslMechanismANONYMOUS encoding.Symbol = "ANONYMOUS" - saslMechanismEXTERNAL encoding.Symbol = "EXTERNAL" - saslMechanismXOAUTH2 encoding.Symbol = "XOAUTH2" -) - -// SASLType represents a SASL configuration to use during authentication. -type SASLType func(c *Conn) error - -// ConnSASLPlain enables SASL PLAIN authentication for the connection. -// -// SASL PLAIN transmits credentials in plain text and should only be used -// on TLS/SSL enabled connection. -func SASLTypePlain(username, password string) SASLType { - // TODO: how widely used is hostname? should it be supported - return func(c *Conn) error { - // make handlers map if no other mechanism has - if c.saslHandlers == nil { - c.saslHandlers = make(map[encoding.Symbol]stateFunc) - } - - // add the handler the the map - c.saslHandlers[saslMechanismPLAIN] = func(ctx context.Context) (stateFunc, error) { - // send saslInit with PLAIN payload - init := &frames.SASLInit{ - Mechanism: "PLAIN", - InitialResponse: []byte("\x00" + username + "\x00" + password), - Hostname: "", - } - fr := frames.Frame{ - Type: frames.TypeSASL, - Body: init, - } - debug.Log(1, "TX (ConnSASLPlain %p): %s", c, fr) - timeout, err := c.getWriteTimeout(ctx) - if err != nil { - return nil, err - } - if err = c.writeFrame(timeout, fr); err != nil { - return nil, err - } - - // go to c.saslOutcome to handle the server response - return c.saslOutcome, nil - } - return nil - } -} - -// ConnSASLAnonymous enables SASL ANONYMOUS authentication for the connection. -func SASLTypeAnonymous() SASLType { - return func(c *Conn) error { - // make handlers map if no other mechanism has - if c.saslHandlers == nil { - c.saslHandlers = make(map[encoding.Symbol]stateFunc) - } - - // add the handler the the map - c.saslHandlers[saslMechanismANONYMOUS] = func(ctx context.Context) (stateFunc, error) { - init := &frames.SASLInit{ - Mechanism: saslMechanismANONYMOUS, - InitialResponse: []byte("anonymous"), - } - fr := frames.Frame{ - Type: frames.TypeSASL, - Body: init, - } - debug.Log(1, "TX (ConnSASLAnonymous %p): %s", c, fr) - timeout, err := c.getWriteTimeout(ctx) - if err != nil { - return nil, err - } - if err = c.writeFrame(timeout, fr); err != nil { - return nil, err - } - - // go to c.saslOutcome to handle the server response - return c.saslOutcome, nil - } - return nil - } -} - -// ConnSASLExternal enables SASL EXTERNAL authentication for the connection. -// The value for resp is dependent on the type of authentication (empty string is common for TLS). -// See https://datatracker.ietf.org/doc/html/rfc4422#appendix-A for additional info. -func SASLTypeExternal(resp string) SASLType { - return func(c *Conn) error { - // make handlers map if no other mechanism has - if c.saslHandlers == nil { - c.saslHandlers = make(map[encoding.Symbol]stateFunc) - } - - // add the handler the the map - c.saslHandlers[saslMechanismEXTERNAL] = func(ctx context.Context) (stateFunc, error) { - init := &frames.SASLInit{ - Mechanism: saslMechanismEXTERNAL, - InitialResponse: []byte(resp), - } - fr := frames.Frame{ - Type: frames.TypeSASL, - Body: init, - } - debug.Log(1, "TX (ConnSASLExternal %p): %s", c, fr) - timeout, err := c.getWriteTimeout(ctx) - if err != nil { - return nil, err - } - if err = c.writeFrame(timeout, fr); err != nil { - return nil, err - } - - // go to c.saslOutcome to handle the server response - return c.saslOutcome, nil - } - return nil - } -} - -// ConnSASLXOAUTH2 enables SASL XOAUTH2 authentication for the connection. -// -// The saslMaxFrameSizeOverride parameter allows the limit that governs the maximum frame size this client will allow -// itself to generate to be raised for the sasl-init frame only. Set this when the size of the size of the SASL XOAUTH2 -// initial client response (which contains the username and bearer token) would otherwise breach the 512 byte min-max-frame-size -// (http://docs.oasis-open.org/amqp/core/v1.0/os/amqp-core-transport-v1.0-os.html#definition-MIN-MAX-FRAME-SIZE). Pass -1 -// to keep the default. -// -// SASL XOAUTH2 transmits the bearer in plain text and should only be used -// on TLS/SSL enabled connection. -func SASLTypeXOAUTH2(username, bearer string, saslMaxFrameSizeOverride uint32) SASLType { - return func(c *Conn) error { - // make handlers map if no other mechanism has - if c.saslHandlers == nil { - c.saslHandlers = make(map[encoding.Symbol]stateFunc) - } - - response, err := saslXOAUTH2InitialResponse(username, bearer) - if err != nil { - return err - } - - handler := saslXOAUTH2Handler{ - conn: c, - maxFrameSizeOverride: saslMaxFrameSizeOverride, - response: response, - } - // add the handler the the map - c.saslHandlers[saslMechanismXOAUTH2] = handler.init - return nil - } -} - -type saslXOAUTH2Handler struct { - conn *Conn - maxFrameSizeOverride uint32 - response []byte - errorResponse []byte // https://developers.google.com/gmail/imap/xoauth2-protocol#error_response -} - -func (s saslXOAUTH2Handler) init(ctx context.Context) (stateFunc, error) { - originalPeerMaxFrameSize := s.conn.peerMaxFrameSize - if s.maxFrameSizeOverride > s.conn.peerMaxFrameSize { - s.conn.peerMaxFrameSize = s.maxFrameSizeOverride - } - timeout, err := s.conn.getWriteTimeout(ctx) - if err != nil { - return nil, err - } - err = s.conn.writeFrame(timeout, frames.Frame{ - Type: frames.TypeSASL, - Body: &frames.SASLInit{ - Mechanism: saslMechanismXOAUTH2, - InitialResponse: s.response, - }, - }) - s.conn.peerMaxFrameSize = originalPeerMaxFrameSize - if err != nil { - return nil, err - } - - return s.step, nil -} - -func (s saslXOAUTH2Handler) step(ctx context.Context) (stateFunc, error) { - // read challenge or outcome frame - fr, err := s.conn.readFrame() - if err != nil { - return nil, err - } - - switch v := fr.Body.(type) { - case *frames.SASLOutcome: - // check if auth succeeded - if v.Code != encoding.CodeSASLOK { - return nil, fmt.Errorf("SASL XOAUTH2 auth failed with code %#00x: %s : %s", - v.Code, v.AdditionalData, s.errorResponse) - } - - // return to c.negotiateProto - s.conn.saslComplete = true - return s.conn.negotiateProto, nil - case *frames.SASLChallenge: - if s.errorResponse == nil { - s.errorResponse = v.Challenge - - timeout, err := s.conn.getWriteTimeout(ctx) - if err != nil { - return nil, err - } - - // The SASL protocol requires clients to send an empty response to this challenge. - err = s.conn.writeFrame(timeout, frames.Frame{ - Type: frames.TypeSASL, - Body: &frames.SASLResponse{ - Response: []byte{}, - }, - }) - if err != nil { - return nil, err - } - return s.step, nil - } else { - return nil, fmt.Errorf("SASL XOAUTH2 unexpected additional error response received during "+ - "exchange. Initial error response: %s, additional response: %s", s.errorResponse, v.Challenge) - } - default: - return nil, fmt.Errorf("sasl: unexpected frame type %T", fr.Body) - } -} - -func saslXOAUTH2InitialResponse(username string, bearer string) ([]byte, error) { - if len(bearer) == 0 { - return []byte{}, fmt.Errorf("unacceptable bearer token") - } - for _, char := range bearer { - if char < '\x20' || char > '\x7E' { - return []byte{}, fmt.Errorf("unacceptable bearer token") - } - } - for _, char := range username { - if char == '\x01' { - return []byte{}, fmt.Errorf("unacceptable username") - } - } - return []byte("user=" + username + "\x01auth=Bearer " + bearer + "\x01\x01"), nil -} diff --git a/sdk/messaging/azeventhubs/internal/go-amqp/sender.go b/sdk/messaging/azeventhubs/internal/go-amqp/sender.go deleted file mode 100644 index bb130ba17b2d..000000000000 --- a/sdk/messaging/azeventhubs/internal/go-amqp/sender.go +++ /dev/null @@ -1,476 +0,0 @@ -// Copyright (C) 2017 Kale Blankenship -// Portions Copyright (c) Microsoft Corporation - -package amqp - -import ( - "context" - "encoding/binary" - "errors" - "fmt" - "sync" - - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp/internal/buffer" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp/internal/debug" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp/internal/encoding" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp/internal/frames" -) - -// Sender sends messages on a single AMQP link. -type Sender struct { - l link - transfers chan transferEnvelope // sender uses to send transfer frames - - mu sync.Mutex // protects buf and nextDeliveryTag - buf buffer.Buffer - nextDeliveryTag uint64 -} - -// LinkName() is the name of the link used for this Sender. -func (s *Sender) LinkName() string { - return s.l.key.name -} - -// MaxMessageSize is the maximum size of a single message. -func (s *Sender) MaxMessageSize() uint64 { - return s.l.maxMessageSize -} - -// SendOptions contains any optional values for the Sender.Send method. -type SendOptions struct { - // Indicates the message is to be sent as settled when settlement mode is SenderSettleModeMixed. - // If the settlement mode is SenderSettleModeUnsettled and Settled is true, an error is returned. - Settled bool -} - -// Send sends a Message. -// -// Blocks until the message is sent or an error occurs. If the peer is -// configured for receiver settlement mode second, the call also blocks -// until the peer confirms message settlement. -// -// - ctx controls waiting for the message to be sent and possibly confirmed -// - msg is the message to send -// - opts contains optional values, pass nil to accept the defaults -// -// If the context's deadline expires or is cancelled before the operation -// completes, the message is in an unknown state of transmission. -// -// Send is safe for concurrent use. Since only a single message can be -// sent on a link at a time, this is most useful when settlement confirmation -// has been requested (receiver settle mode is second). In this case, -// additional messages can be sent while the current goroutine is waiting -// for the confirmation. -func (s *Sender) Send(ctx context.Context, msg *Message, opts *SendOptions) error { - // check if the link is dead. while it's safe to call s.send - // in this case, this will avoid some allocations etc. - select { - case <-s.l.done: - return s.l.doneErr - default: - // link is still active - } - done, err := s.send(ctx, msg, opts) - if err != nil { - return err - } - - // wait for transfer to be confirmed - select { - case state := <-done: - if state, ok := state.(*encoding.StateRejected); ok { - if state.Error != nil { - return state.Error - } - return errors.New("the peer rejected the message without specifying an error") - } - return nil - case <-s.l.done: - return s.l.doneErr - case <-ctx.Done(): - // TODO: if the message is not settled and we never received a disposition, how can we consider the message as sent? - return ctx.Err() - } -} - -// send is separated from Send so that the mutex unlock can be deferred without -// locking the transfer confirmation that happens in Send. -func (s *Sender) send(ctx context.Context, msg *Message, opts *SendOptions) (chan encoding.DeliveryState, error) { - const ( - maxDeliveryTagLength = 32 - maxTransferFrameHeader = 66 // determined by calcMaxTransferFrameHeader - ) - if len(msg.DeliveryTag) > maxDeliveryTagLength { - return nil, fmt.Errorf("delivery tag is over the allowed %v bytes, len: %v", maxDeliveryTagLength, len(msg.DeliveryTag)) - } - - s.mu.Lock() - defer s.mu.Unlock() - - s.buf.Reset() - err := msg.Marshal(&s.buf) - if err != nil { - return nil, err - } - - if s.l.maxMessageSize != 0 && uint64(s.buf.Len()) > s.l.maxMessageSize { - return nil, fmt.Errorf("encoded message size exceeds max of %d", s.l.maxMessageSize) - } - - senderSettled := senderSettleModeValue(s.l.senderSettleMode) == SenderSettleModeSettled - if opts != nil { - if opts.Settled && senderSettleModeValue(s.l.senderSettleMode) == SenderSettleModeUnsettled { - return nil, errors.New("can't send message as settled when sender settlement mode is unsettled") - } else if opts.Settled { - senderSettled = true - } - } - - var ( - maxPayloadSize = int64(s.l.session.conn.peerMaxFrameSize) - maxTransferFrameHeader - ) - - deliveryTag := msg.DeliveryTag - if len(deliveryTag) == 0 { - // use uint64 encoded as []byte as deliveryTag - deliveryTag = make([]byte, 8) - binary.BigEndian.PutUint64(deliveryTag, s.nextDeliveryTag) - s.nextDeliveryTag++ - } - - fr := frames.PerformTransfer{ - Handle: s.l.handle, - DeliveryID: &needsDeliveryID, - DeliveryTag: deliveryTag, - MessageFormat: &msg.Format, - More: s.buf.Len() > 0, - } - - for fr.More { - buf, _ := s.buf.Next(maxPayloadSize) - fr.Payload = append([]byte(nil), buf...) - fr.More = s.buf.Len() > 0 - if !fr.More { - // SSM=settled: overrides RSM; no acks. - // SSM=unsettled: sender should wait for receiver to ack - // RSM=first: receiver considers it settled immediately, but must still send ack (SSM=unsettled only) - // RSM=second: receiver sends ack and waits for return ack from sender (SSM=unsettled only) - - // mark final transfer as settled when sender mode is settled - fr.Settled = senderSettled - - // set done on last frame - fr.Done = make(chan encoding.DeliveryState, 1) - } - - // NOTE: we MUST send a copy of fr here since we modify it post send - - sent := make(chan error, 1) - select { - case s.transfers <- transferEnvelope{Ctx: ctx, Frame: fr, Sent: sent}: - // frame was sent to our mux - case <-s.l.done: - return nil, s.l.doneErr - case <-ctx.Done(): - return nil, &Error{Condition: ErrCondTransferLimitExceeded, Description: fmt.Sprintf("credit limit exceeded for sending link %s", s.l.key.name)} - } - - select { - case err := <-sent: - if err != nil { - return nil, err - } - case <-s.l.done: - return nil, s.l.doneErr - } - - // clear values that are only required on first message - fr.DeliveryID = nil - fr.DeliveryTag = nil - fr.MessageFormat = nil - } - - return fr.Done, nil -} - -// Address returns the link's address. -func (s *Sender) Address() string { - if s.l.target == nil { - return "" - } - return s.l.target.Address -} - -// Close closes the Sender and AMQP link. -// - ctx controls waiting for the peer to acknowledge the close -// -// If the context's deadline expires or is cancelled before the operation -// completes, an error is returned. However, the operation will continue to -// execute in the background. Subsequent calls will return a *LinkError -// that contains the context's error message. -func (s *Sender) Close(ctx context.Context) error { - return s.l.closeLink(ctx) -} - -// newSendingLink creates a new sending link and attaches it to the session -func newSender(target string, session *Session, opts *SenderOptions) (*Sender, error) { - l := newLink(session, encoding.RoleSender) - l.target = &frames.Target{Address: target} - l.source = new(frames.Source) - s := &Sender{ - l: l, - } - - if opts == nil { - return s, nil - } - - for _, v := range opts.Capabilities { - s.l.source.Capabilities = append(s.l.source.Capabilities, encoding.Symbol(v)) - } - if opts.Durability > DurabilityUnsettledState { - return nil, fmt.Errorf("invalid Durability %d", opts.Durability) - } - s.l.source.Durable = opts.Durability - if opts.DynamicAddress { - s.l.target.Address = "" - s.l.dynamicAddr = opts.DynamicAddress - } - if opts.ExpiryPolicy != "" { - if err := encoding.ValidateExpiryPolicy(opts.ExpiryPolicy); err != nil { - return nil, err - } - s.l.source.ExpiryPolicy = opts.ExpiryPolicy - } - s.l.source.Timeout = opts.ExpiryTimeout - if opts.Name != "" { - s.l.key.name = opts.Name - } - if opts.Properties != nil { - s.l.properties = make(map[encoding.Symbol]any) - for k, v := range opts.Properties { - if k == "" { - return nil, errors.New("link property key must not be empty") - } - s.l.properties[encoding.Symbol(k)] = v - } - } - if opts.RequestedReceiverSettleMode != nil { - if rsm := *opts.RequestedReceiverSettleMode; rsm > ReceiverSettleModeSecond { - return nil, fmt.Errorf("invalid RequestedReceiverSettleMode %d", rsm) - } - s.l.receiverSettleMode = opts.RequestedReceiverSettleMode - } - if opts.SettlementMode != nil { - if ssm := *opts.SettlementMode; ssm > SenderSettleModeMixed { - return nil, fmt.Errorf("invalid SettlementMode %d", ssm) - } - s.l.senderSettleMode = opts.SettlementMode - } - s.l.source.Address = opts.SourceAddress - for _, v := range opts.TargetCapabilities { - s.l.target.Capabilities = append(s.l.target.Capabilities, encoding.Symbol(v)) - } - if opts.TargetDurability != DurabilityNone { - s.l.target.Durable = opts.TargetDurability - } - if opts.TargetExpiryPolicy != ExpiryPolicySessionEnd { - s.l.target.ExpiryPolicy = opts.TargetExpiryPolicy - } - if opts.TargetExpiryTimeout != 0 { - s.l.target.Timeout = opts.TargetExpiryTimeout - } - return s, nil -} - -func (s *Sender) attach(ctx context.Context) error { - if err := s.l.attach(ctx, func(pa *frames.PerformAttach) { - pa.Role = encoding.RoleSender - if pa.Target == nil { - pa.Target = new(frames.Target) - } - pa.Target.Dynamic = s.l.dynamicAddr - }, func(pa *frames.PerformAttach) { - if s.l.target == nil { - s.l.target = new(frames.Target) - } - - // if dynamic address requested, copy assigned name to address - if s.l.dynamicAddr && pa.Target != nil { - s.l.target.Address = pa.Target.Address - } - }); err != nil { - return err - } - - s.transfers = make(chan transferEnvelope) - - return nil -} - -type senderTestHooks struct { - MuxTransfer func() -} - -func (s *Sender) mux(hooks senderTestHooks) { - if hooks.MuxTransfer == nil { - hooks.MuxTransfer = nop - } - - defer func() { - close(s.l.done) - }() - -Loop: - for { - var outgoingTransfers chan transferEnvelope - if s.l.linkCredit > 0 { - debug.Log(1, "TX (Sender %p) (enable): target: %q, link credit: %d, deliveryCount: %d", s, s.l.target.Address, s.l.linkCredit, s.l.deliveryCount) - outgoingTransfers = s.transfers - } else { - debug.Log(1, "TX (Sender %p) (pause): target: %q, link credit: %d, deliveryCount: %d", s, s.l.target.Address, s.l.linkCredit, s.l.deliveryCount) - } - - closed := s.l.close - if s.l.closeInProgress { - // swap out channel so it no longer triggers - closed = nil - - // disable sending once closing is in progress. - // this prevents races with mux shutdown and - // the peer sending disposition frames. - outgoingTransfers = nil - } - - select { - // received frame - case q := <-s.l.rxQ.Wait(): - // populated queue - fr := *q.Dequeue() - s.l.rxQ.Release(q) - - // if muxHandleFrame returns an error it means the mux must terminate. - // note that in the case of a client-side close due to an error, nil - // is returned in order to keep the mux running to ack the detach frame. - if err := s.muxHandleFrame(fr); err != nil { - s.l.doneErr = err - return - } - - // send data - case env := <-outgoingTransfers: - hooks.MuxTransfer() - select { - case s.l.session.txTransfer <- env: - debug.Log(2, "TX (Sender %p): mux transfer to Session: %d, %s", s, s.l.session.channel, env.Frame) - // decrement link-credit after entire message transferred - if !env.Frame.More { - s.l.deliveryCount++ - s.l.linkCredit-- - // we are the sender and we keep track of the peer's link credit - debug.Log(3, "TX (Sender %p): link: %s, link credit: %d", s, s.l.key.name, s.l.linkCredit) - } - continue Loop - case <-s.l.close: - continue Loop - case <-s.l.session.done: - continue Loop - } - - case <-closed: - if s.l.closeInProgress { - // a client-side close due to protocol error is in progress - continue - } - - // sender is being closed by the client - s.l.closeInProgress = true - fr := &frames.PerformDetach{ - Handle: s.l.handle, - Closed: true, - } - s.l.txFrame(context.Background(), fr, nil) - - case <-s.l.session.done: - s.l.doneErr = s.l.session.doneErr - return - } - } -} - -// muxHandleFrame processes fr based on type. -// depending on the peer's RSM, it might return a disposition frame for sending -func (s *Sender) muxHandleFrame(fr frames.FrameBody) error { - debug.Log(2, "RX (Sender %p): %s", s, fr) - switch fr := fr.(type) { - // flow control frame - case *frames.PerformFlow: - // the sender's link-credit variable MUST be set according to this formula when flow information is given by the receiver: - // link-credit(snd) := delivery-count(rcv) + link-credit(rcv) - delivery-count(snd) - linkCredit := *fr.LinkCredit - s.l.deliveryCount - if fr.DeliveryCount != nil { - // DeliveryCount can be nil if the receiver hasn't processed - // the attach. That shouldn't be the case here, but it's - // what ActiveMQ does. - linkCredit += *fr.DeliveryCount - } - - s.l.linkCredit = linkCredit - - if !fr.Echo { - return nil - } - - var ( - // copy because sent by pointer below; prevent race - deliveryCount = s.l.deliveryCount - ) - - // send flow - resp := &frames.PerformFlow{ - Handle: &s.l.handle, - DeliveryCount: &deliveryCount, - LinkCredit: &linkCredit, // max number of messages - } - - select { - case s.l.session.tx <- frameBodyEnvelope{Ctx: context.Background(), FrameBody: resp}: - debug.Log(2, "TX (Sender %p): mux frame to Session (%p): %d, %s", s, s.l.session, s.l.session.channel, resp) - case <-s.l.close: - return nil - case <-s.l.session.done: - return s.l.session.doneErr - } - - case *frames.PerformDisposition: - if fr.Settled { - return nil - } - - // peer is in mode second, so we must send confirmation of disposition. - // NOTE: the ack must be sent through the session so it can close out - // the in-flight disposition. - dr := &frames.PerformDisposition{ - Role: encoding.RoleSender, - First: fr.First, - Last: fr.Last, - Settled: true, - } - - select { - case s.l.session.tx <- frameBodyEnvelope{Ctx: context.Background(), FrameBody: dr}: - debug.Log(2, "TX (Sender %p): mux frame to Session (%p): %d, %s", s, s.l.session, s.l.session.channel, dr) - case <-s.l.close: - return nil - case <-s.l.session.done: - return s.l.session.doneErr - } - - return nil - - default: - return s.l.muxHandleFrame(fr) - } - - return nil -} diff --git a/sdk/messaging/azeventhubs/internal/go-amqp/session.go b/sdk/messaging/azeventhubs/internal/go-amqp/session.go deleted file mode 100644 index 7a6cfd625441..000000000000 --- a/sdk/messaging/azeventhubs/internal/go-amqp/session.go +++ /dev/null @@ -1,792 +0,0 @@ -// Copyright (C) 2017 Kale Blankenship -// Portions Copyright (c) Microsoft Corporation - -package amqp - -import ( - "context" - "errors" - "fmt" - "math" - "sync" - - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp/internal/bitmap" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp/internal/debug" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp/internal/encoding" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp/internal/frames" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp/internal/queue" -) - -// Default session options -const ( - defaultWindow = 5000 -) - -// SessionOptions contains the optional settings for configuring an AMQP session. -type SessionOptions struct { - // MaxLinks sets the maximum number of links (Senders/Receivers) - // allowed on the session. - // - // Minimum: 1. - // Default: 4294967295. - MaxLinks uint32 -} - -// Session is an AMQP session. -// -// A session multiplexes Receivers. -type Session struct { - channel uint16 // session's local channel - remoteChannel uint16 // session's remote channel, owned by conn.connReader - conn *Conn // underlying conn - tx chan frameBodyEnvelope // non-transfer frames to be sent; session must track disposition - txTransfer chan transferEnvelope // transfer frames to be sent; session must track disposition - - // frames destined for this session are added to this queue by conn.connReader - rxQ *queue.Holder[frames.FrameBody] - - // flow control - incomingWindow uint32 - outgoingWindow uint32 - needFlowCount uint32 - - handleMax uint32 - - // link management - linksMu sync.RWMutex // used to synchronize link handle allocation - linksByKey map[linkKey]*link // mapping of name+role link - handles *bitmap.Bitmap // allocated handles - - abandonedLinksMu sync.Mutex - abandonedLinks []*link - - // used for gracefully closing session - close chan struct{} // closed by calling Close(). it signals that the end performative should be sent - closeOnce sync.Once - - // part of internal public surface area - done chan struct{} // closed when the session has terminated (mux exited); DO NOT wait on this from within Session.mux() as it will never trigger! - endSent chan struct{} // closed when the end performative has been sent; once this is closed, links MUST NOT send any frames! - doneErr error // contains the mux error state; ONLY written to by the mux and MUST only be read from after done is closed! - closeErr error // contains the error state returned from Close(); ONLY Close() reads/writes this! -} - -func newSession(c *Conn, channel uint16, opts *SessionOptions) *Session { - s := &Session{ - conn: c, - channel: channel, - tx: make(chan frameBodyEnvelope), - txTransfer: make(chan transferEnvelope), - incomingWindow: defaultWindow, - outgoingWindow: defaultWindow, - handleMax: math.MaxUint32 - 1, - linksMu: sync.RWMutex{}, - linksByKey: make(map[linkKey]*link), - close: make(chan struct{}), - done: make(chan struct{}), - endSent: make(chan struct{}), - } - - if opts != nil { - if opts.MaxLinks != 0 { - // MaxLinks is the number of total links. - // handleMax is the max handle ID which starts - // at zero. so we decrement by one - s.handleMax = opts.MaxLinks - 1 - } - } - - // create handle map after options have been applied - s.handles = bitmap.New(s.handleMax) - - s.rxQ = queue.NewHolder(queue.New[frames.FrameBody](int(s.incomingWindow))) - - return s -} - -// waitForFrame waits for an incoming frame to be queued. -// it returns the next frame from the queue, or an error. -// the error is either from the context or conn.doneErr. -// not meant for consumption outside of session.go. -func (s *Session) waitForFrame(ctx context.Context) (frames.FrameBody, error) { - var q *queue.Queue[frames.FrameBody] - select { - case <-ctx.Done(): - return nil, ctx.Err() - case <-s.conn.done: - return nil, s.conn.doneErr - case q = <-s.rxQ.Wait(): - // populated queue - } - - fr := q.Dequeue() - s.rxQ.Release(q) - - return *fr, nil -} - -func (s *Session) begin(ctx context.Context) error { - // send Begin to server - begin := &frames.PerformBegin{ - NextOutgoingID: 0, - IncomingWindow: s.incomingWindow, - OutgoingWindow: s.outgoingWindow, - HandleMax: s.handleMax, - } - - if err := s.txFrameAndWait(ctx, begin); err != nil { - return err - } - - // wait for response - fr, err := s.waitForFrame(ctx) - if err != nil { - // if we exit before receiving the ack, our caller will clean up the channel. - // however, it does mean that the peer will now have assigned an outgoing - // channel ID that's not in use. - return err - } - - begin, ok := fr.(*frames.PerformBegin) - if !ok { - // this codepath is hard to hit (impossible?). if the response isn't a PerformBegin and we've not - // yet seen the remote channel number, the default clause in conn.connReader will protect us from that. - // if we have seen the remote channel number then it's likely the session.mux for that channel will - // either swallow the frame or blow up in some other way, both causing this call to hang. - // deallocate session on error. we can't call - // s.Close() as the session mux hasn't started yet. - debug.Log(1, "RX (Session %p): unexpected begin response frame %T", s, fr) - s.conn.deleteSession(s) - if err := s.conn.Close(); err != nil { - return err - } - return &ConnError{inner: fmt.Errorf("unexpected begin response: %#v", fr)} - } - - // start Session multiplexor - go s.mux(begin) - - return nil -} - -// Close closes the session. -// - ctx controls waiting for the peer to acknowledge the session is closed -// -// If the context's deadline expires or is cancelled before the operation -// completes, an error is returned. However, the operation will continue to -// execute in the background. Subsequent calls will return a *SessionError -// that contains the context's error message. -func (s *Session) Close(ctx context.Context) error { - var ctxErr error - s.closeOnce.Do(func() { - close(s.close) - - // once the mux has received the ack'ing end performative, the mux will - // exit which deletes the session and closes s.done. - select { - case <-s.done: - s.closeErr = s.doneErr - - case <-ctx.Done(): - // notify the caller that the close timed out/was cancelled. - // the mux will remain running and once the ack is received it will terminate. - ctxErr = ctx.Err() - - // record that the close timed out/was cancelled. - // subsequent calls to Close() will return this - debug.Log(1, "TX (Session %p) channel %d: %v", s, s.channel, ctxErr) - s.closeErr = &SessionError{inner: ctxErr} - } - }) - - if ctxErr != nil { - return ctxErr - } - - var sessionErr *SessionError - if errors.As(s.closeErr, &sessionErr) && sessionErr.RemoteErr == nil && sessionErr.inner == nil { - // an empty SessionError means the session was cleanly closed by the caller - return nil - } - return s.closeErr -} - -// txFrame sends a frame to the connWriter. -// - ctx is used to provide the write deadline -// - fr is the frame to write to net.Conn -// - sent is the optional channel that will contain the error if the write fails -func (s *Session) txFrame(ctx context.Context, fr frames.FrameBody, sent chan error) { - debug.Log(2, "TX (Session %p) mux frame to Conn (%p): %s", s, s.conn, fr) - s.conn.sendFrame(ctx, frames.Frame{ - Type: frames.TypeAMQP, - Channel: s.channel, - Body: fr, - }, sent) -} - -// txFrameAndWait sends a frame to the connWriter and waits for the write to complete -// - ctx is used to provide the write deadline -// - fr is the frame to write to net.Conn -func (s *Session) txFrameAndWait(ctx context.Context, fr frames.FrameBody) error { - sent := make(chan error, 1) - s.txFrame(ctx, fr, sent) - - select { - case err := <-sent: - return err - case <-s.conn.done: - return s.conn.doneErr - case <-s.done: - return s.doneErr - } -} - -// NewReceiver opens a new receiver link on the session. -// - ctx controls waiting for the peer to create a sending terminus -// - source is the name of the peer's sending terminus -// - opts contains optional values, pass nil to accept the defaults -// -// If the context's deadline expires or is cancelled before the operation -// completes, an error is returned. If the Receiver was successfully -// created, it will be cleaned up in future calls to NewReceiver. -func (s *Session) NewReceiver(ctx context.Context, source string, opts *ReceiverOptions) (*Receiver, error) { - r, err := newReceiver(source, s, opts) - if err != nil { - return nil, err - } - if err = r.attach(ctx); err != nil { - return nil, err - } - - go r.mux(receiverTestHooks{}) - - return r, nil -} - -// NewSender opens a new sender link on the session. -// - ctx controls waiting for the peer to create a receiver terminus -// - target is the name of the peer's receiver terminus -// - opts contains optional values, pass nil to accept the defaults -// -// If the context's deadline expires or is cancelled before the operation -// completes, an error is returned. If the Sender was successfully -// created, it will be cleaned up in future calls to NewSender. -func (s *Session) NewSender(ctx context.Context, target string, opts *SenderOptions) (*Sender, error) { - l, err := newSender(target, s, opts) - if err != nil { - return nil, err - } - if err = l.attach(ctx); err != nil { - return nil, err - } - - go l.mux(senderTestHooks{}) - - return l, nil -} - -func (s *Session) mux(remoteBegin *frames.PerformBegin) { - defer func() { - if s.doneErr == nil { - s.doneErr = &SessionError{} - } else if connErr := (&ConnError{}); !errors.As(s.doneErr, &connErr) { - // only wrap non-ConnError error types - var amqpErr *Error - if errors.As(s.doneErr, &amqpErr) { - s.doneErr = &SessionError{RemoteErr: amqpErr} - } else { - s.doneErr = &SessionError{inner: s.doneErr} - } - } - // Signal goroutines waiting on the session. - close(s.done) - }() - - var ( - links = make(map[uint32]*link) // mapping of remote handles to links - handlesByDeliveryID = make(map[uint32]uint32) // mapping of deliveryIDs to handles - deliveryIDByHandle = make(map[uint32]uint32) // mapping of handles to latest deliveryID - handlesByRemoteDeliveryID = make(map[uint32]uint32) // mapping of remote deliveryID to handles - - settlementByDeliveryID = make(map[uint32]chan encoding.DeliveryState) - - nextDeliveryID uint32 // tracks the next delivery ID for outgoing transfers - - // flow control values - nextOutgoingID uint32 - nextIncomingID = remoteBegin.NextOutgoingID - remoteIncomingWindow = remoteBegin.IncomingWindow - remoteOutgoingWindow = remoteBegin.OutgoingWindow - - closeInProgress bool // indicates the end performative has been sent - ) - - closeWithError := func(e1 *Error, e2 error) { - if closeInProgress { - debug.Log(3, "TX (Session %p): close already pending, discarding %v", s, e1) - return - } - - closeInProgress = true - s.doneErr = e2 - s.txFrame(context.Background(), &frames.PerformEnd{Error: e1}, nil) - close(s.endSent) - } - - for { - txTransfer := s.txTransfer - // disable txTransfer if flow control windows have been exceeded - if remoteIncomingWindow == 0 || s.outgoingWindow == 0 { - debug.Log(1, "TX (Session %p): disabling txTransfer - window exceeded. remoteIncomingWindow: %d outgoingWindow: %d", - s, remoteIncomingWindow, s.outgoingWindow) - txTransfer = nil - } - - tx := s.tx - closed := s.close - if closeInProgress { - // swap out channel so it no longer triggers - closed = nil - - // once the end performative is sent, we're not allowed to send any frames - tx = nil - txTransfer = nil - } - - // notes on client-side closing session - // when session is closed, we must keep the mux running until the ack'ing end performative - // has been received. during this window, the session is allowed to receive frames but cannot - // send them. - // client-side close happens either by user calling Session.Close() or due to mux initiated - // close due to a violation of some invariant (see sending &Error{} to s.close). in the case - // that both code paths have been triggered, we must be careful to preserve the error that - // triggered the mux initiated close so it can be surfaced to the caller. - - select { - // conn has completed, exit - case <-s.conn.done: - s.doneErr = s.conn.doneErr - return - - case <-closed: - if closeInProgress { - // a client-side close due to protocol error is in progress - continue - } - // session is being closed by the client - closeInProgress = true - s.txFrame(context.Background(), &frames.PerformEnd{}, nil) - close(s.endSent) - - // incoming frame - case q := <-s.rxQ.Wait(): - fr := *q.Dequeue() - s.rxQ.Release(q) - debug.Log(2, "RX (Session %p): %s", s, fr) - - switch body := fr.(type) { - // Disposition frames can reference transfers from more than one - // link. Send this frame to all of them. - case *frames.PerformDisposition: - start := body.First - end := start - if body.Last != nil { - end = *body.Last - } - for deliveryID := start; deliveryID <= end; deliveryID++ { - handles := handlesByDeliveryID - if body.Role == encoding.RoleSender { - handles = handlesByRemoteDeliveryID - } - - handle, ok := handles[deliveryID] - if !ok { - debug.Log(2, "RX (Session %p): role %s: didn't find deliveryID %d in handles map", s, body.Role, deliveryID) - continue - } - delete(handles, deliveryID) - - if body.Settled && body.Role == encoding.RoleReceiver { - // check if settlement confirmation was requested, if so - // confirm by closing channel - if done, ok := settlementByDeliveryID[deliveryID]; ok { - delete(settlementByDeliveryID, deliveryID) - select { - case done <- body.State: - default: - } - close(done) - } - } - - link, ok := links[handle] - if !ok { - closeWithError(&Error{ - Condition: ErrCondUnattachedHandle, - Description: "received disposition frame referencing a handle that's not in use", - }, fmt.Errorf("received disposition frame with unknown link handle %d", handle)) - continue - } - - s.muxFrameToLink(link, fr) - } - continue - case *frames.PerformFlow: - if body.NextIncomingID == nil { - // This is a protocol error: - // "[...] MUST be set if the peer has received - // the begin frame for the session" - closeWithError(&Error{ - Condition: ErrCondNotAllowed, - Description: "next-incoming-id not set after session established", - }, errors.New("protocol error: received flow without next-incoming-id after session established")) - continue - } - - // "When the endpoint receives a flow frame from its peer, - // it MUST update the next-incoming-id directly from the - // next-outgoing-id of the frame, and it MUST update the - // remote-outgoing-window directly from the outgoing-window - // of the frame." - nextIncomingID = body.NextOutgoingID - remoteOutgoingWindow = body.OutgoingWindow - - // "The remote-incoming-window is computed as follows: - // - // next-incoming-id(flow) + incoming-window(flow) - next-outgoing-id(endpoint) - // - // If the next-incoming-id field of the flow frame is not set, then remote-incoming-window is computed as follows: - // - // initial-outgoing-id(endpoint) + incoming-window(flow) - next-outgoing-id(endpoint)" - remoteIncomingWindow = body.IncomingWindow - nextOutgoingID - remoteIncomingWindow += *body.NextIncomingID - debug.Log(3, "RX (Session %p): flow - remoteOutgoingWindow: %d remoteIncomingWindow: %d nextOutgoingID: %d", s, remoteOutgoingWindow, remoteIncomingWindow, nextOutgoingID) - - // Send to link if handle is set - if body.Handle != nil { - link, ok := links[*body.Handle] - if !ok { - closeWithError(&Error{ - Condition: ErrCondUnattachedHandle, - Description: "received flow frame referencing a handle that's not in use", - }, fmt.Errorf("received flow frame with unknown link handle %d", body.Handle)) - continue - } - - s.muxFrameToLink(link, fr) - continue - } - - if body.Echo && !closeInProgress { - niID := nextIncomingID - resp := &frames.PerformFlow{ - NextIncomingID: &niID, - IncomingWindow: s.incomingWindow, - NextOutgoingID: nextOutgoingID, - OutgoingWindow: s.outgoingWindow, - } - s.txFrame(context.Background(), resp, nil) - } - - case *frames.PerformAttach: - // On Attach response link should be looked up by name, then added - // to the links map with the remote's handle contained in this - // attach frame. - // - // Note body.Role is the remote peer's role, we reverse for the local key. - s.linksMu.RLock() - link, linkOk := s.linksByKey[linkKey{name: body.Name, role: !body.Role}] - s.linksMu.RUnlock() - if !linkOk { - closeWithError(&Error{ - Condition: ErrCondNotAllowed, - Description: "received mismatched attach frame", - }, fmt.Errorf("protocol error: received mismatched attach frame %+v", body)) - continue - } - - link.remoteHandle = body.Handle - links[link.remoteHandle] = link - - s.muxFrameToLink(link, fr) - - case *frames.PerformTransfer: - s.needFlowCount++ - // "Upon receiving a transfer, the receiving endpoint will - // increment the next-incoming-id to match the implicit - // transfer-id of the incoming transfer plus one, as well - // as decrementing the remote-outgoing-window, and MAY - // (depending on policy) decrement its incoming-window." - nextIncomingID++ - // don't loop to intmax - if remoteOutgoingWindow > 0 { - remoteOutgoingWindow-- - } - link, ok := links[body.Handle] - if !ok { - closeWithError(&Error{ - Condition: ErrCondUnattachedHandle, - Description: "received transfer frame referencing a handle that's not in use", - }, fmt.Errorf("received transfer frame with unknown link handle %d", body.Handle)) - continue - } - - s.muxFrameToLink(link, fr) - - // if this message is received unsettled and link rcv-settle-mode == second, add to handlesByRemoteDeliveryID - if !body.Settled && body.DeliveryID != nil && link.receiverSettleMode != nil && *link.receiverSettleMode == ReceiverSettleModeSecond { - debug.Log(1, "RX (Session %p): adding handle to handlesByRemoteDeliveryID. delivery ID: %d", s, *body.DeliveryID) - handlesByRemoteDeliveryID[*body.DeliveryID] = body.Handle - } - - // Update peer's outgoing window if half has been consumed. - if s.needFlowCount >= s.incomingWindow/2 && !closeInProgress { - debug.Log(3, "RX (Session %p): channel %d: flow - s.needFlowCount(%d) >= s.incomingWindow(%d)/2\n", s, s.channel, s.needFlowCount, s.incomingWindow) - s.needFlowCount = 0 - nID := nextIncomingID - flow := &frames.PerformFlow{ - NextIncomingID: &nID, - IncomingWindow: s.incomingWindow, - NextOutgoingID: nextOutgoingID, - OutgoingWindow: s.outgoingWindow, - } - s.txFrame(context.Background(), flow, nil) - } - - case *frames.PerformDetach: - link, ok := links[body.Handle] - if !ok { - closeWithError(&Error{ - Condition: ErrCondUnattachedHandle, - Description: "received detach frame referencing a handle that's not in use", - }, fmt.Errorf("received detach frame with unknown link handle %d", body.Handle)) - continue - } - s.muxFrameToLink(link, fr) - - // we received a detach frame and sent it to the link. - // this was either the response to a client-side initiated - // detach or our peer detached us. either way, now that - // the link has processed the frame it's detached so we - // are safe to clean up its state. - delete(links, link.remoteHandle) - delete(deliveryIDByHandle, link.handle) - s.deallocateHandle(link) - - case *frames.PerformEnd: - // there are two possibilities: - // - this is the ack to a client-side Close() - // - the peer is ending the session so we must ack - - if closeInProgress { - return - } - - // peer detached us with an error, save it and send the ack - if body.Error != nil { - s.doneErr = body.Error - } - - fr := frames.PerformEnd{} - s.txFrame(context.Background(), &fr, nil) - - // per spec, when end is received, we're no longer allowed to receive frames - return - - default: - debug.Log(1, "RX (Session %p): unexpected frame: %s\n", s, body) - closeWithError(&Error{ - Condition: ErrCondInternalError, - Description: "session received unexpected frame", - }, fmt.Errorf("internal error: unexpected frame %T", body)) - } - - case env := <-txTransfer: - fr := &env.Frame - // record current delivery ID - var deliveryID uint32 - if fr.DeliveryID == &needsDeliveryID { - deliveryID = nextDeliveryID - fr.DeliveryID = &deliveryID - nextDeliveryID++ - deliveryIDByHandle[fr.Handle] = deliveryID - - // add to handleByDeliveryID if not sender-settled - if !fr.Settled { - handlesByDeliveryID[deliveryID] = fr.Handle - } - } else { - // if fr.DeliveryID is nil it must have been added - // to deliveryIDByHandle already - deliveryID = deliveryIDByHandle[fr.Handle] - } - - // log after the delivery ID has been assigned - debug.Log(2, "TX (Session %p): %d, %s", s, s.channel, fr) - - // frame has been sender-settled, remove from map - if fr.Settled { - delete(handlesByDeliveryID, deliveryID) - } - - s.txFrame(env.Ctx, fr, env.Sent) - if sendErr := <-env.Sent; sendErr != nil { - s.doneErr = sendErr - - // put the error back as our sender will read from this channel - env.Sent <- sendErr - return - } - - // if not settled, add done chan to map - if !fr.Settled && fr.Done != nil { - settlementByDeliveryID[deliveryID] = fr.Done - } else if fr.Done != nil { - // sender-settled, close done now that the transfer has been sent - close(fr.Done) - } - - // "Upon sending a transfer, the sending endpoint will increment - // its next-outgoing-id, decrement its remote-incoming-window, - // and MAY (depending on policy) decrement its outgoing-window." - nextOutgoingID++ - // don't decrement if we're at 0 or we could loop to int max - if remoteIncomingWindow != 0 { - remoteIncomingWindow-- - } - - case env := <-tx: - fr := env.FrameBody - debug.Log(2, "TX (Session %p): %d, %s", s, s.channel, fr) - switch fr := env.FrameBody.(type) { - case *frames.PerformDisposition: - if fr.Settled && fr.Role == encoding.RoleSender { - // sender with a peer that's in mode second; sending confirmation of disposition. - // disposition frames can reference a range of delivery IDs, although it's highly - // likely in this case there will only be one. - start := fr.First - end := start - if fr.Last != nil { - end = *fr.Last - } - for deliveryID := start; deliveryID <= end; deliveryID++ { - // send delivery state to the channel and close it to signal - // that the delivery has completed. - if done, ok := settlementByDeliveryID[deliveryID]; ok { - delete(settlementByDeliveryID, deliveryID) - select { - case done <- fr.State: - default: - } - close(done) - } - } - } - s.txFrame(env.Ctx, fr, env.Sent) - case *frames.PerformFlow: - niID := nextIncomingID - fr.NextIncomingID = &niID - fr.IncomingWindow = s.incomingWindow - fr.NextOutgoingID = nextOutgoingID - fr.OutgoingWindow = s.outgoingWindow - s.txFrame(context.Background(), fr, env.Sent) - case *frames.PerformTransfer: - panic("transfer frames must use txTransfer") - default: - s.txFrame(context.Background(), fr, env.Sent) - } - } - } -} - -func (s *Session) allocateHandle(ctx context.Context, l *link) error { - s.linksMu.Lock() - defer s.linksMu.Unlock() - - // Check if link name already exists, if so then an error should be returned - existing := s.linksByKey[l.key] - if existing != nil { - return fmt.Errorf("link with name '%v' already exists", l.key.name) - } - - next, ok := s.handles.Next() - if !ok { - if err := s.Close(ctx); err != nil { - return err - } - // handle numbers are zero-based, report the actual count - return &SessionError{inner: fmt.Errorf("reached session handle max (%d)", s.handleMax+1)} - } - - l.handle = next // allocate handle to the link - s.linksByKey[l.key] = l // add to mapping - - return nil -} - -func (s *Session) deallocateHandle(l *link) { - s.linksMu.Lock() - defer s.linksMu.Unlock() - - delete(s.linksByKey, l.key) - s.handles.Remove(l.handle) -} - -func (s *Session) abandonLink(l *link) { - s.abandonedLinksMu.Lock() - defer s.abandonedLinksMu.Unlock() - s.abandonedLinks = append(s.abandonedLinks, l) -} - -func (s *Session) freeAbandonedLinks(ctx context.Context) error { - s.abandonedLinksMu.Lock() - defer s.abandonedLinksMu.Unlock() - - debug.Log(3, "TX (Session %p): cleaning up %d abandoned links", s, len(s.abandonedLinks)) - - for _, l := range s.abandonedLinks { - dr := &frames.PerformDetach{ - Handle: l.handle, - Closed: true, - } - if err := s.txFrameAndWait(ctx, dr); err != nil { - return err - } - } - - s.abandonedLinks = nil - return nil -} - -func (s *Session) muxFrameToLink(l *link, fr frames.FrameBody) { - q := l.rxQ.Acquire() - q.Enqueue(fr) - l.rxQ.Release(q) - debug.Log(2, "RX (Session %p): mux frame to link (%p): %s, %s", s, l, l.key.name, fr) -} - -// transferEnvelope is used by senders to send transfer frames -type transferEnvelope struct { - Ctx context.Context - Frame frames.PerformTransfer - - // Sent is *never* nil as we use this for confirmation of sending - // NOTE: use a buffered channel of size 1 when populating - Sent chan error -} - -// frameBodyEnvelope is used by senders and receivers to send frames. -type frameBodyEnvelope struct { - Ctx context.Context - FrameBody frames.FrameBody - - // Sent *can* be nil depending on what frame is being sent. - // e.g. sending a disposition frame frame a receiver's settlement - // APIs will have a non-nil channel vs sending a flow frame - // NOTE: use a buffered channel of size 1 when populating - Sent chan error -} - -// the address of this var is a sentinel value indicating -// that a transfer frame is in need of a delivery ID -var needsDeliveryID uint32 diff --git a/sdk/messaging/azeventhubs/internal/links_test.go b/sdk/messaging/azeventhubs/internal/links_test.go index 50d9894bc9a9..40be4df96d74 100644 --- a/sdk/messaging/azeventhubs/internal/links_test.go +++ b/sdk/messaging/azeventhubs/internal/links_test.go @@ -13,8 +13,8 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpwrap" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/test" + "github.com/Azure/go-amqp" "github.com/stretchr/testify/require" ) diff --git a/sdk/messaging/azeventhubs/internal/links_unit_test.go b/sdk/messaging/azeventhubs/internal/links_unit_test.go index f869135067c6..9f405f4ac011 100644 --- a/sdk/messaging/azeventhubs/internal/links_unit_test.go +++ b/sdk/messaging/azeventhubs/internal/links_unit_test.go @@ -9,9 +9,9 @@ import ( "testing" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpwrap" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/mock" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/test" + "github.com/Azure/go-amqp" "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" ) diff --git a/sdk/messaging/azeventhubs/internal/mock/mock_amqp.go b/sdk/messaging/azeventhubs/internal/mock/mock_amqp.go index 4b67e349957a..400aa972eea2 100644 --- a/sdk/messaging/azeventhubs/internal/mock/mock_amqp.go +++ b/sdk/messaging/azeventhubs/internal/mock/mock_amqp.go @@ -13,7 +13,7 @@ import ( reflect "reflect" amqpwrap "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpwrap" - amqp "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp" + amqp "github.com/Azure/go-amqp" gomock "github.com/golang/mock/gomock" ) diff --git a/sdk/messaging/azeventhubs/internal/mock/mock_helpers.go b/sdk/messaging/azeventhubs/internal/mock/mock_helpers.go index 32577acb9db5..7ce47cd9c9d2 100644 --- a/sdk/messaging/azeventhubs/internal/mock/mock_helpers.go +++ b/sdk/messaging/azeventhubs/internal/mock/mock_helpers.go @@ -8,7 +8,7 @@ import ( "fmt" "time" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp" + "github.com/Azure/go-amqp" gomock "github.com/golang/mock/gomock" ) diff --git a/sdk/messaging/azeventhubs/internal/namespace.go b/sdk/messaging/azeventhubs/internal/namespace.go index 8f6b8dc34c8d..6fff62906bac 100644 --- a/sdk/messaging/azeventhubs/internal/namespace.go +++ b/sdk/messaging/azeventhubs/internal/namespace.go @@ -18,9 +18,9 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpwrap" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/auth" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/sbauth" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/utils" + "github.com/Azure/go-amqp" ) var rootUserAgent = telemetry.Format("azeventhubs", Version) diff --git a/sdk/messaging/azeventhubs/internal/namespace_test.go b/sdk/messaging/azeventhubs/internal/namespace_test.go index 13973b73355a..5d0bd2615d74 100644 --- a/sdk/messaging/azeventhubs/internal/namespace_test.go +++ b/sdk/messaging/azeventhubs/internal/namespace_test.go @@ -16,9 +16,9 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpwrap" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/auth" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/sbauth" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/test" + "github.com/Azure/go-amqp" "github.com/stretchr/testify/require" ) diff --git a/sdk/messaging/azeventhubs/internal/rpc.go b/sdk/messaging/azeventhubs/internal/rpc.go index 8d5bf1ff1765..ffad41d6dd15 100644 --- a/sdk/messaging/azeventhubs/internal/rpc.go +++ b/sdk/messaging/azeventhubs/internal/rpc.go @@ -14,7 +14,7 @@ import ( azlog "github.com/Azure/azure-sdk-for-go/sdk/internal/log" "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpwrap" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp" + "github.com/Azure/go-amqp" ) const ( diff --git a/sdk/messaging/azeventhubs/internal/rpc_test.go b/sdk/messaging/azeventhubs/internal/rpc_test.go index cf39bd695c17..44c6affed55e 100644 --- a/sdk/messaging/azeventhubs/internal/rpc_test.go +++ b/sdk/messaging/azeventhubs/internal/rpc_test.go @@ -12,9 +12,9 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/internal/log" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpwrap" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/mock" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/test" + "github.com/Azure/go-amqp" "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" ) diff --git a/sdk/messaging/azeventhubs/internal/utils/retrier_test.go b/sdk/messaging/azeventhubs/internal/utils/retrier_test.go index acae2d432949..cc3997bc3b06 100644 --- a/sdk/messaging/azeventhubs/internal/utils/retrier_test.go +++ b/sdk/messaging/azeventhubs/internal/utils/retrier_test.go @@ -14,7 +14,7 @@ import ( azlog "github.com/Azure/azure-sdk-for-go/sdk/internal/log" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp" + "github.com/Azure/go-amqp" "github.com/stretchr/testify/require" ) diff --git a/sdk/messaging/azeventhubs/mgmt.go b/sdk/messaging/azeventhubs/mgmt.go index 333bf717d2a7..8c31e00bf198 100644 --- a/sdk/messaging/azeventhubs/mgmt.go +++ b/sdk/messaging/azeventhubs/mgmt.go @@ -13,7 +13,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpwrap" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/eh" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp" + "github.com/Azure/go-amqp" ) // EventHubProperties represents properties of the Event Hub, like the number of partitions. diff --git a/sdk/messaging/azeventhubs/partition_client.go b/sdk/messaging/azeventhubs/partition_client.go index 654099f53016..238cb275babc 100644 --- a/sdk/messaging/azeventhubs/partition_client.go +++ b/sdk/messaging/azeventhubs/partition_client.go @@ -14,7 +14,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpwrap" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp" + "github.com/Azure/go-amqp" ) // DefaultConsumerGroup is the name of the default consumer group in the Event Hubs service. diff --git a/sdk/messaging/azeventhubs/partition_client_unit_test.go b/sdk/messaging/azeventhubs/partition_client_unit_test.go index cbb570fadd8b..7560dbf7e209 100644 --- a/sdk/messaging/azeventhubs/partition_client_unit_test.go +++ b/sdk/messaging/azeventhubs/partition_client_unit_test.go @@ -10,8 +10,8 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/test" + "github.com/Azure/go-amqp" "github.com/stretchr/testify/require" ) diff --git a/sdk/messaging/azeventhubs/processor_load_balancer.go b/sdk/messaging/azeventhubs/processor_load_balancer.go index 99f396cfd652..e708c6dd514b 100644 --- a/sdk/messaging/azeventhubs/processor_load_balancer.go +++ b/sdk/messaging/azeventhubs/processor_load_balancer.go @@ -8,6 +8,7 @@ import ( "fmt" "math" "math/rand" + "strings" "time" "github.com/Azure/azure-sdk-for-go/sdk/internal/log" @@ -109,7 +110,27 @@ func (lb *processorLoadBalancer) LoadBalance(ctx context.Context, partitionIDs [ } } - return lb.checkpointStore.ClaimOwnership(ctx, ownerships, nil) + actual, err := lb.checkpointStore.ClaimOwnership(ctx, ownerships, nil) + + if err != nil { + return nil, err + } + + if log.Should(EventConsumer) { + log.Writef(EventConsumer, "[%0.5s] Asked for %s, got %s", lb.details.ClientID, partitionsForOwnerships(ownerships), partitionsForOwnerships(actual)) + } + + return actual, nil +} + +func partitionsForOwnerships(all []Ownership) string { + var parts []string + + for _, o := range all { + parts = append(parts, o.PartitionID) + } + + return strings.Join(parts, ",") } // getAvailablePartitions finds all partitions that are either completely unowned _or_ diff --git a/sdk/messaging/azeventhubs/processor_partition_client.go b/sdk/messaging/azeventhubs/processor_partition_client.go index da0f4eb402b8..cc52c533da5a 100644 --- a/sdk/messaging/azeventhubs/processor_partition_client.go +++ b/sdk/messaging/azeventhubs/processor_partition_client.go @@ -33,16 +33,19 @@ func (c *ProcessorPartitionClient) ReceiveEvents(ctx context.Context, count int, return c.innerClient.ReceiveEvents(ctx, count, options) } -// UpdateCheckpoint updates the checkpoint store. This ensure that if the Processor is restarted it will -// start from after this point. -func (p *ProcessorPartitionClient) UpdateCheckpoint(ctx context.Context, latestEvent *ReceivedEventData) error { - return p.checkpointStore.UpdateCheckpoint(ctx, Checkpoint{ +// UpdateCheckpoint updates the checkpoint in the CheckpointStore. New Processors will resume after +// this checkpoint for this partition. +func (p *ProcessorPartitionClient) UpdateCheckpoint(ctx context.Context, latestEvent *ReceivedEventData, options *UpdateCheckpointOptions) error { + seq := latestEvent.SequenceNumber + offset := latestEvent.Offset + + return p.checkpointStore.SetCheckpoint(ctx, Checkpoint{ ConsumerGroup: p.consumerClientDetails.ConsumerGroup, EventHubName: p.consumerClientDetails.EventHubName, FullyQualifiedNamespace: p.consumerClientDetails.FullyQualifiedNamespace, PartitionID: p.partitionID, - SequenceNumber: &latestEvent.SequenceNumber, - Offset: latestEvent.Offset, + SequenceNumber: &seq, + Offset: &offset, }, nil) } @@ -63,3 +66,8 @@ func (c *ProcessorPartitionClient) Close(ctx context.Context) error { return nil } + +// UpdateCheckpointOptions contains optional parameters for the [ProcessorPartitionClient.UpdateCheckpoint] function. +type UpdateCheckpointOptions struct { + // For future expansion +} diff --git a/sdk/messaging/azeventhubs/processor_test.go b/sdk/messaging/azeventhubs/processor_test.go index 136c3391066a..e08771110439 100644 --- a/sdk/messaging/azeventhubs/processor_test.go +++ b/sdk/messaging/azeventhubs/processor_test.go @@ -95,9 +95,7 @@ func TestProcessor_Contention(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) - processor, err := azeventhubs.NewProcessor(consumerClient, checkpointStore, &azeventhubs.ProcessorOptions{ - UpdateInterval: 10 * time.Second, - }) + processor, err := azeventhubs.NewProcessor(consumerClient, checkpointStore, nil) require.NoError(t, err) processors = append(processors, testData{ @@ -142,8 +140,8 @@ func TestProcessor_Contention(t *testing.T) { nextCtx, cancelNext := context.WithCancel(context.Background()) defer cancelNext() - // arbitrary, but basically if we go 20 seconds without a new partition acquisition we're probably balanced. - const idleInterval = 20 * time.Second + // arbitrary interval, we just want to give enough time that things seem balanced. + const idleInterval = 10 * time.Second active := time.AfterFunc(idleInterval, cancelNext) for { @@ -160,7 +158,7 @@ func TestProcessor_Contention(t *testing.T) { active.Reset(time.Minute) } - t.Logf("%s hasn't received a new partition in %sseconds", procStuff.name, idleInterval/time.Second) + t.Logf("%s hasn't received a new partition in %s", procStuff.name, idleInterval) }(client) } @@ -403,7 +401,7 @@ func processEventsForTest(t *testing.T, producerClient *azeventhubs.ProducerClie t.Logf("Updating checkpoint for partition %s", partitionClient.PartitionID()) - if err := partitionClient.UpdateCheckpoint(context.TODO(), events[len(events)-1]); err != nil { + if err := partitionClient.UpdateCheckpoint(context.TODO(), events[len(events)-1], nil); err != nil { return err } diff --git a/sdk/messaging/azeventhubs/processor_unit_test.go b/sdk/messaging/azeventhubs/processor_unit_test.go index ff257b58d408..950ec342d18e 100644 --- a/sdk/messaging/azeventhubs/processor_unit_test.go +++ b/sdk/messaging/azeventhubs/processor_unit_test.go @@ -224,7 +224,7 @@ func TestUnit_Processor_Run_singleConsumerPerPartition(t *testing.T) { func TestUnit_Processor_Run_startPosition(t *testing.T) { cps := newCheckpointStoreForTest() - err := cps.UpdateCheckpoint(context.Background(), Checkpoint{ + err := cps.SetCheckpoint(context.Background(), Checkpoint{ ConsumerGroup: "consumer-group", EventHubName: "event-hub", FullyQualifiedNamespace: "fqdn", @@ -266,7 +266,7 @@ func TestUnit_Processor_Run_startPosition(t *testing.T) { err = partClient.UpdateCheckpoint(context.Background(), &ReceivedEventData{ SequenceNumber: 405, - }) + }, nil) require.NoError(t, err) checkpoints, err = cps.ListCheckpoints(context.Background(), processor.consumerClientDetails.FullyQualifiedNamespace, diff --git a/sdk/messaging/azeventhubs/producer_client.go b/sdk/messaging/azeventhubs/producer_client.go index 4d85956ff2ac..f887e265838b 100644 --- a/sdk/messaging/azeventhubs/producer_client.go +++ b/sdk/messaging/azeventhubs/producer_client.go @@ -17,7 +17,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/amqpwrap" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs/internal/go-amqp" + "github.com/Azure/go-amqp" ) // WebSocketConnParams are passed to your web socket creation function (ClientOptions.NewWebSocketConn) diff --git a/sdk/messaging/azservicebus/amqp_message.go b/sdk/messaging/azservicebus/amqp_message.go index c7484257994f..b96eb98d19c4 100644 --- a/sdk/messaging/azservicebus/amqp_message.go +++ b/sdk/messaging/azservicebus/amqp_message.go @@ -6,7 +6,7 @@ package azservicebus import ( "time" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp" + "github.com/Azure/go-amqp" ) // AMQPAnnotatedMessage represents the AMQP message, as received from Service Bus. diff --git a/sdk/messaging/azservicebus/go.mod b/sdk/messaging/azservicebus/go.mod index 3b70a5f5868e..1c391b1f80c9 100644 --- a/sdk/messaging/azservicebus/go.mod +++ b/sdk/messaging/azservicebus/go.mod @@ -8,6 +8,7 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.0.0 github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0 + github.com/Azure/go-amqp v1.0.0 ) require ( @@ -16,7 +17,7 @@ require ( // used in stress tests github.com/microsoft/ApplicationInsights-Go v0.4.4 - github.com/stretchr/testify v1.7.0 + github.com/stretchr/testify v1.7.1 // used in examples only nhooyr.io/websocket v1.8.7 @@ -30,7 +31,6 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/gofrs/uuid v3.3.0+incompatible // indirect github.com/golang-jwt/jwt v3.2.1+incompatible // indirect - github.com/google/go-cmp v0.5.1 // indirect github.com/google/uuid v1.1.1 // indirect github.com/klauspost/compress v1.10.3 // indirect github.com/kylelemons/godebug v1.1.0 // indirect diff --git a/sdk/messaging/azservicebus/go.sum b/sdk/messaging/azservicebus/go.sum index 69c9fadf8fe8..1cefd2440d19 100644 --- a/sdk/messaging/azservicebus/go.sum +++ b/sdk/messaging/azservicebus/go.sum @@ -6,12 +6,15 @@ github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.0.0 h1:Yoicul8bnVdQrhDMTHxdE github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.0.0/go.mod h1:+6sju8gk8FRmSajX3Oz4G5Gm7P+mbqE9FVaXXFYTkCM= github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0 h1:leh5DwKv6Ihwi+h60uHtn6UWAxBbZ0q8DwQVMzf61zw= github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= +github.com/Azure/go-amqp v1.0.0 h1:QfCugi1M+4F2JDTRgVnRw7PYXLXZ9hmqk3+9+oJh3OA= +github.com/Azure/go-amqp v1.0.0/go.mod h1:+bg0x3ce5+Q3ahCEXnCsGG3ETpDQe3MEVnOuT2ywPwc= github.com/AzureAD/microsoft-authentication-library-for-go v0.4.0 h1:WVsrXCnHlDDX8ls+tootqRE87/hL9S/g4ewig9RsD/c= github.com/AzureAD/microsoft-authentication-library-for-go v0.4.0/go.mod h1:Vt9sXTKwMyGcOxSmLDMnGPgqsUg7m8pe215qMLrDXw4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= @@ -42,8 +45,7 @@ github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaW github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1 h1:JFrFEBb2xKufg6XkJsJr+WbKb4FQlURi5RUcBveYu9k= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -84,8 +86,8 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/tedsuo/ifrit v0.0.0-20180802180643-bea94bb476cc/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= @@ -129,7 +131,6 @@ golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= diff --git a/sdk/messaging/azservicebus/internal/amqpLinks_test.go b/sdk/messaging/azservicebus/internal/amqpLinks_test.go index 4b063bc1a3d6..fe1558fb20e1 100644 --- a/sdk/messaging/azservicebus/internal/amqpLinks_test.go +++ b/sdk/messaging/azservicebus/internal/amqpLinks_test.go @@ -15,9 +15,9 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/internal/log" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/amqpwrap" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/test" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/utils" + "github.com/Azure/go-amqp" "github.com/stretchr/testify/require" ) diff --git a/sdk/messaging/azservicebus/internal/amqp_test_utils.go b/sdk/messaging/azservicebus/internal/amqp_test_utils.go index ef336e3f2650..20dbf52cb7a7 100644 --- a/sdk/messaging/azservicebus/internal/amqp_test_utils.go +++ b/sdk/messaging/azservicebus/internal/amqp_test_utils.go @@ -11,8 +11,8 @@ import ( azlog "github.com/Azure/azure-sdk-for-go/sdk/internal/log" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/amqpwrap" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/utils" + "github.com/Azure/go-amqp" ) type FakeNS struct { diff --git a/sdk/messaging/azservicebus/internal/amqplinks_unit_test.go b/sdk/messaging/azservicebus/internal/amqplinks_unit_test.go index 70e06658f287..dcc7907cd33c 100644 --- a/sdk/messaging/azservicebus/internal/amqplinks_unit_test.go +++ b/sdk/messaging/azservicebus/internal/amqplinks_unit_test.go @@ -13,10 +13,10 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/internal/log" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/amqpwrap" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/mock/emulation" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/test" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/utils" + "github.com/Azure/go-amqp" "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" ) diff --git a/sdk/messaging/azservicebus/internal/amqpwrap/amqpwrap.go b/sdk/messaging/azservicebus/internal/amqpwrap/amqpwrap.go index 6e62d0b383d6..154e6ecb1d09 100644 --- a/sdk/messaging/azservicebus/internal/amqpwrap/amqpwrap.go +++ b/sdk/messaging/azservicebus/internal/amqpwrap/amqpwrap.go @@ -10,7 +10,7 @@ import ( "errors" "time" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp" + "github.com/Azure/go-amqp" ) // AMQPReceiver is implemented by *amqp.Receiver diff --git a/sdk/messaging/azservicebus/internal/amqpwrap/mock_amqp_test.go b/sdk/messaging/azservicebus/internal/amqpwrap/mock_amqp_test.go index 0fa5e88b86ac..5970412670e6 100644 --- a/sdk/messaging/azservicebus/internal/amqpwrap/mock_amqp_test.go +++ b/sdk/messaging/azservicebus/internal/amqpwrap/mock_amqp_test.go @@ -12,7 +12,7 @@ import ( context "context" reflect "reflect" - go_amqp "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp" + go_amqp "github.com/Azure/go-amqp" gomock "github.com/golang/mock/gomock" ) diff --git a/sdk/messaging/azservicebus/internal/amqpwrap/rpc.go b/sdk/messaging/azservicebus/internal/amqpwrap/rpc.go index 2bb4e75d890e..4804f1176939 100644 --- a/sdk/messaging/azservicebus/internal/amqpwrap/rpc.go +++ b/sdk/messaging/azservicebus/internal/amqpwrap/rpc.go @@ -6,7 +6,7 @@ package amqpwrap import ( "context" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp" + "github.com/Azure/go-amqp" ) // RPCResponse is the simplified response structure from an RPC like call diff --git a/sdk/messaging/azservicebus/internal/cbs.go b/sdk/messaging/azservicebus/internal/cbs.go index fd484d93561c..5a43d5e1f7c8 100644 --- a/sdk/messaging/azservicebus/internal/cbs.go +++ b/sdk/messaging/azservicebus/internal/cbs.go @@ -11,7 +11,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/amqpwrap" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/auth" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp" + "github.com/Azure/go-amqp" ) const ( diff --git a/sdk/messaging/azservicebus/internal/errors.go b/sdk/messaging/azservicebus/internal/errors.go index b87d0621587a..cdeb63d97581 100644 --- a/sdk/messaging/azservicebus/internal/errors.go +++ b/sdk/messaging/azservicebus/internal/errors.go @@ -15,7 +15,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/amqpwrap" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp" + "github.com/Azure/go-amqp" ) type errNonRetriable struct { diff --git a/sdk/messaging/azservicebus/internal/errors_test.go b/sdk/messaging/azservicebus/internal/errors_test.go index 82658a0b60e6..1343417dcfbc 100644 --- a/sdk/messaging/azservicebus/internal/errors_test.go +++ b/sdk/messaging/azservicebus/internal/errors_test.go @@ -15,7 +15,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azidentity" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/amqpwrap" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp" + "github.com/Azure/go-amqp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/sdk/messaging/azservicebus/internal/exported/error_test.go b/sdk/messaging/azservicebus/internal/exported/error_test.go index 4fe69bb82344..a64d85d8fb22 100644 --- a/sdk/messaging/azservicebus/internal/exported/error_test.go +++ b/sdk/messaging/azservicebus/internal/exported/error_test.go @@ -6,7 +6,7 @@ package exported import ( "testing" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp" + "github.com/Azure/go-amqp" "github.com/stretchr/testify/require" ) diff --git a/sdk/messaging/azservicebus/internal/go-amqp/LICENSE b/sdk/messaging/azservicebus/internal/go-amqp/LICENSE deleted file mode 100644 index 387b3e7e0f3b..000000000000 --- a/sdk/messaging/azservicebus/internal/go-amqp/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ - MIT License - - Copyright (C) 2017 Kale Blankenship - Portions Copyright (C) Microsoft Corporation - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE diff --git a/sdk/messaging/azservicebus/internal/go-amqp/conn.go b/sdk/messaging/azservicebus/internal/go-amqp/conn.go deleted file mode 100644 index f6e4b808f922..000000000000 --- a/sdk/messaging/azservicebus/internal/go-amqp/conn.go +++ /dev/null @@ -1,1135 +0,0 @@ -// Copyright (C) 2017 Kale Blankenship -// Portions Copyright (c) Microsoft Corporation - -package amqp - -import ( - "bytes" - "context" - "crypto/tls" - "errors" - "fmt" - "math" - "net" - "net/url" - "sync" - "time" - - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp/internal/bitmap" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp/internal/buffer" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp/internal/debug" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp/internal/encoding" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp/internal/frames" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp/internal/shared" -) - -// Default connection options -const ( - defaultIdleTimeout = 1 * time.Minute - defaultMaxFrameSize = 65536 - defaultMaxSessions = 65536 - defaultWriteTimeout = 30 * time.Second -) - -// ConnOptions contains the optional settings for configuring an AMQP connection. -type ConnOptions struct { - // ContainerID sets the container-id to use when opening the connection. - // - // A container ID will be randomly generated if this option is not used. - ContainerID string - - // HostName sets the hostname sent in the AMQP - // Open frame and TLS ServerName (if not otherwise set). - HostName string - - // IdleTimeout specifies the maximum period between - // receiving frames from the peer. - // - // Specify a value less than zero to disable idle timeout. - // - // Default: 1 minute (60000000000). - IdleTimeout time.Duration - - // MaxFrameSize sets the maximum frame size that - // the connection will accept. - // - // Must be 512 or greater. - // - // Default: 512. - MaxFrameSize uint32 - - // MaxSessions sets the maximum number of channels. - // The value must be greater than zero. - // - // Default: 65535. - MaxSessions uint16 - - // Properties sets an entry in the connection properties map sent to the server. - Properties map[string]any - - // SASLType contains the specified SASL authentication mechanism. - SASLType SASLType - - // TLSConfig sets the tls.Config to be used during - // TLS negotiation. - // - // This option is for advanced usage, in most scenarios - // providing a URL scheme of "amqps://" is sufficient. - TLSConfig *tls.Config - - // WriteTimeout controls the write deadline when writing AMQP frames to the - // underlying net.Conn and no caller provided context.Context is available or - // the context contains no deadline (e.g. context.Background()). - // The timeout is set per write. - // - // Setting to a value less than zero means no timeout is set, so writes - // defer to the underlying behavior of net.Conn with no write deadline. - // - // Default: 30s - WriteTimeout time.Duration - - // test hook - dialer dialer -} - -// Dial connects to an AMQP server. -// -// If the addr includes a scheme, it must be "amqp", "amqps", or "amqp+ssl". -// If no port is provided, 5672 will be used for "amqp" and 5671 for "amqps" or "amqp+ssl". -// -// If username and password information is not empty it's used as SASL PLAIN -// credentials, equal to passing ConnSASLPlain option. -// -// opts: pass nil to accept the default values. -func Dial(ctx context.Context, addr string, opts *ConnOptions) (*Conn, error) { - c, err := dialConn(ctx, addr, opts) - if err != nil { - return nil, err - } - err = c.start(ctx) - if err != nil { - return nil, err - } - return c, nil -} - -// NewConn establishes a new AMQP client connection over conn. -// opts: pass nil to accept the default values. -func NewConn(ctx context.Context, conn net.Conn, opts *ConnOptions) (*Conn, error) { - c, err := newConn(conn, opts) - if err != nil { - return nil, err - } - err = c.start(ctx) - if err != nil { - return nil, err - } - return c, nil -} - -// Conn is an AMQP connection. -type Conn struct { - net net.Conn // underlying connection - dialer dialer // used for testing purposes, it allows faking dialing TCP/TLS endpoints - writeTimeout time.Duration // controls write deadline in absense of a context - - // TLS - tlsNegotiation bool // negotiate TLS - tlsComplete bool // TLS negotiation complete - tlsConfig *tls.Config // TLS config, default used if nil (ServerName set to Client.hostname) - - // SASL - saslHandlers map[encoding.Symbol]stateFunc // map of supported handlers keyed by SASL mechanism, SASL not negotiated if nil - saslComplete bool // SASL negotiation complete; internal *except* for SASL auth methods - - // local settings - maxFrameSize uint32 // max frame size to accept - channelMax uint16 // maximum number of channels to allow - hostname string // hostname of remote server (set explicitly or parsed from URL) - idleTimeout time.Duration // maximum period between receiving frames - properties map[encoding.Symbol]any // additional properties sent upon connection open - containerID string // set explicitly or randomly generated - - // peer settings - peerIdleTimeout time.Duration // maximum period between sending frames - peerMaxFrameSize uint32 // maximum frame size peer will accept - - // conn state - done chan struct{} // indicates the connection has terminated - doneErr error // contains the error state returned from Close(); DO NOT TOUCH outside of conn.go until done has been closed! - - // connReader and connWriter management - rxtxExit chan struct{} // signals connReader and connWriter to exit - closeOnce sync.Once // ensures that close() is only called once - - // session tracking - channels *bitmap.Bitmap - sessionsByChannel map[uint16]*Session - sessionsByChannelMu sync.RWMutex - - abandonedSessionsMu sync.Mutex - abandonedSessions []*Session - - // connReader - rxBuf buffer.Buffer // incoming bytes buffer - rxDone chan struct{} // closed when connReader exits - rxErr error // contains last error reading from c.net; DO NOT TOUCH outside of connReader until rxDone has been closed! - - // connWriter - txFrame chan frameEnvelope // AMQP frames to be sent by connWriter - txBuf buffer.Buffer // buffer for marshaling frames before transmitting - txDone chan struct{} // closed when connWriter exits - txErr error // contains last error writing to c.net; DO NOT TOUCH outside of connWriter until txDone has been closed! -} - -// used to abstract the underlying dialer for testing purposes -type dialer interface { - NetDialerDial(ctx context.Context, c *Conn, host, port string) error - TLSDialWithDialer(ctx context.Context, c *Conn, host, port string) error -} - -// implements the dialer interface -type defaultDialer struct{} - -func (defaultDialer) NetDialerDial(ctx context.Context, c *Conn, host, port string) (err error) { - dialer := &net.Dialer{} - c.net, err = dialer.DialContext(ctx, "tcp", net.JoinHostPort(host, port)) - return -} - -func (defaultDialer) TLSDialWithDialer(ctx context.Context, c *Conn, host, port string) (err error) { - dialer := &tls.Dialer{Config: c.tlsConfig} - c.net, err = dialer.DialContext(ctx, "tcp", net.JoinHostPort(host, port)) - return -} - -func dialConn(ctx context.Context, addr string, opts *ConnOptions) (*Conn, error) { - u, err := url.Parse(addr) - if err != nil { - return nil, err - } - host, port := u.Hostname(), u.Port() - if port == "" { - port = "5672" - if u.Scheme == "amqps" || u.Scheme == "amqp+ssl" { - port = "5671" - } - } - - var cp ConnOptions - if opts != nil { - cp = *opts - } - - // prepend SASL credentials when the user/pass segment is not empty - if u.User != nil { - pass, _ := u.User.Password() - cp.SASLType = SASLTypePlain(u.User.Username(), pass) - } - - if cp.HostName == "" { - cp.HostName = host - } - - c, err := newConn(nil, &cp) - if err != nil { - return nil, err - } - - switch u.Scheme { - case "amqp", "": - err = c.dialer.NetDialerDial(ctx, c, host, port) - case "amqps", "amqp+ssl": - c.initTLSConfig() - c.tlsNegotiation = false - err = c.dialer.TLSDialWithDialer(ctx, c, host, port) - default: - err = fmt.Errorf("unsupported scheme %q", u.Scheme) - } - - if err != nil { - return nil, err - } - return c, nil -} - -func newConn(netConn net.Conn, opts *ConnOptions) (*Conn, error) { - c := &Conn{ - dialer: defaultDialer{}, - net: netConn, - maxFrameSize: defaultMaxFrameSize, - peerMaxFrameSize: defaultMaxFrameSize, - channelMax: defaultMaxSessions - 1, // -1 because channel-max starts at zero - idleTimeout: defaultIdleTimeout, - containerID: shared.RandString(40), - done: make(chan struct{}), - rxtxExit: make(chan struct{}), - rxDone: make(chan struct{}), - txFrame: make(chan frameEnvelope), - txDone: make(chan struct{}), - sessionsByChannel: map[uint16]*Session{}, - writeTimeout: defaultWriteTimeout, - } - - // apply options - if opts == nil { - opts = &ConnOptions{} - } - - if opts.WriteTimeout > 0 { - c.writeTimeout = opts.WriteTimeout - } else if opts.WriteTimeout < 0 { - c.writeTimeout = 0 - } - if opts.ContainerID != "" { - c.containerID = opts.ContainerID - } - if opts.HostName != "" { - c.hostname = opts.HostName - } - if opts.IdleTimeout > 0 { - c.idleTimeout = opts.IdleTimeout - } else if opts.IdleTimeout < 0 { - c.idleTimeout = 0 - } - if opts.MaxFrameSize > 0 && opts.MaxFrameSize < 512 { - return nil, fmt.Errorf("invalid MaxFrameSize value %d", opts.MaxFrameSize) - } else if opts.MaxFrameSize > 512 { - c.maxFrameSize = opts.MaxFrameSize - } - if opts.MaxSessions > 0 { - c.channelMax = opts.MaxSessions - } - if opts.SASLType != nil { - if err := opts.SASLType(c); err != nil { - return nil, err - } - } - if opts.Properties != nil { - c.properties = make(map[encoding.Symbol]any) - for key, val := range opts.Properties { - c.properties[encoding.Symbol(key)] = val - } - } - if opts.TLSConfig != nil { - c.tlsConfig = opts.TLSConfig.Clone() - } - if opts.dialer != nil { - c.dialer = opts.dialer - } - return c, nil -} - -func (c *Conn) initTLSConfig() { - // create a new config if not already set - if c.tlsConfig == nil { - c.tlsConfig = new(tls.Config) - } - - // TLS config must have ServerName or InsecureSkipVerify set - if c.tlsConfig.ServerName == "" && !c.tlsConfig.InsecureSkipVerify { - c.tlsConfig.ServerName = c.hostname - } -} - -// start establishes the connection and begins multiplexing network IO. -// It is an error to call Start() on a connection that's been closed. -func (c *Conn) start(ctx context.Context) (err error) { - // if the context has a deadline or is cancellable, start the interruptor goroutine. - // this will close the underlying net.Conn in response to the context. - - if ctx.Done() != nil { - done := make(chan struct{}) - interruptRes := make(chan error, 1) - - defer func() { - close(done) - if ctxErr := <-interruptRes; ctxErr != nil { - // return context error to caller - err = ctxErr - } - }() - - go func() { - select { - case <-ctx.Done(): - c.closeDuringStart() - interruptRes <- ctx.Err() - case <-done: - interruptRes <- nil - } - }() - } - - if err = c.startImpl(ctx); err != nil { - return err - } - - // we can't create the channel bitmap until the connection has been established. - // this is because our peer can tell us the max channels they support. - c.channels = bitmap.New(uint32(c.channelMax)) - - go c.connWriter() - go c.connReader() - - return -} - -func (c *Conn) startImpl(ctx context.Context) error { - // set connection establishment deadline as required - if deadline, ok := ctx.Deadline(); ok && !deadline.IsZero() { - _ = c.net.SetDeadline(deadline) - - // remove connection establishment deadline - defer func() { - _ = c.net.SetDeadline(time.Time{}) - }() - } - - // run connection establishment state machine - for state := c.negotiateProto; state != nil; { - var err error - state, err = state(ctx) - // check if err occurred - if err != nil { - c.closeDuringStart() - return err - } - } - - return nil -} - -// Close closes the connection. -func (c *Conn) Close() error { - c.close() - - // wait until the reader/writer goroutines have exited before proceeding. - // this is to prevent a race between calling Close() and a reader/writer - // goroutine calling close() due to a terminal error. - <-c.txDone - <-c.rxDone - - var connErr *ConnError - if errors.As(c.doneErr, &connErr) && connErr.RemoteErr == nil && connErr.inner == nil { - // an empty ConnectionError means the connection was closed by the caller - return nil - } - - // there was an error during shut-down or connReader/connWriter - // experienced a terminal error - return c.doneErr -} - -// close is called once, either from Close() or when connReader/connWriter exits -func (c *Conn) close() { - c.closeOnce.Do(func() { - defer close(c.done) - - close(c.rxtxExit) - - // wait for writing to stop, allows it to send the final close frame - <-c.txDone - - closeErr := c.net.Close() - - // check rxDone after closing net, otherwise may block - // for up to c.idleTimeout - <-c.rxDone - - if errors.Is(c.rxErr, net.ErrClosed) { - // this is the expected error when the connection is closed, swallow it - c.rxErr = nil - } - - if c.txErr == nil && c.rxErr == nil && closeErr == nil { - // if there are no errors, it means user initiated close() and we shut down cleanly - c.doneErr = &ConnError{} - } else if amqpErr, ok := c.rxErr.(*Error); ok { - // we experienced a peer-initiated close that contained an Error. return it - c.doneErr = &ConnError{RemoteErr: amqpErr} - } else if c.txErr != nil { - // c.txErr is already wrapped in a ConnError - c.doneErr = c.txErr - } else if c.rxErr != nil { - c.doneErr = &ConnError{inner: c.rxErr} - } else { - c.doneErr = &ConnError{inner: closeErr} - } - }) -} - -// closeDuringStart is a special close to be used only during startup (i.e. c.start() and any of its children) -func (c *Conn) closeDuringStart() { - c.closeOnce.Do(func() { - c.net.Close() - }) -} - -// NewSession starts a new session on the connection. -// - ctx controls waiting for the peer to acknowledge the session -// - opts contains optional values, pass nil to accept the defaults -// -// If the context's deadline expires or is cancelled before the operation -// completes, an error is returned. If the Session was successfully -// created, it will be cleaned up in future calls to NewSession. -func (c *Conn) NewSession(ctx context.Context, opts *SessionOptions) (*Session, error) { - // clean up any abandoned sessions first - if err := c.freeAbandonedSessions(ctx); err != nil { - return nil, err - } - - session, err := c.newSession(opts) - if err != nil { - return nil, err - } - - if err := session.begin(ctx); err != nil { - c.abandonSession(session) - return nil, err - } - - return session, nil -} - -func (c *Conn) freeAbandonedSessions(ctx context.Context) error { - c.abandonedSessionsMu.Lock() - defer c.abandonedSessionsMu.Unlock() - - debug.Log(3, "TX (Conn %p): cleaning up %d abandoned sessions", c, len(c.abandonedSessions)) - - for _, s := range c.abandonedSessions { - fr := frames.PerformEnd{} - if err := s.txFrameAndWait(ctx, &fr); err != nil { - return err - } - } - - c.abandonedSessions = nil - return nil -} - -func (c *Conn) newSession(opts *SessionOptions) (*Session, error) { - c.sessionsByChannelMu.Lock() - defer c.sessionsByChannelMu.Unlock() - - // create the next session to allocate - // note that channel always start at 0 - channel, ok := c.channels.Next() - if !ok { - if err := c.Close(); err != nil { - return nil, err - } - return nil, &ConnError{inner: fmt.Errorf("reached connection channel max (%d)", c.channelMax)} - } - session := newSession(c, uint16(channel), opts) - c.sessionsByChannel[session.channel] = session - - return session, nil -} - -func (c *Conn) deleteSession(s *Session) { - c.sessionsByChannelMu.Lock() - defer c.sessionsByChannelMu.Unlock() - - delete(c.sessionsByChannel, s.channel) - c.channels.Remove(uint32(s.channel)) -} - -func (c *Conn) abandonSession(s *Session) { - c.abandonedSessionsMu.Lock() - defer c.abandonedSessionsMu.Unlock() - c.abandonedSessions = append(c.abandonedSessions, s) -} - -// connReader reads from the net.Conn, decodes frames, and either handles -// them here as appropriate or sends them to the session.rx channel. -func (c *Conn) connReader() { - defer func() { - close(c.rxDone) - c.close() - }() - - var sessionsByRemoteChannel = make(map[uint16]*Session) - var err error - for { - if err != nil { - debug.Log(1, "RX (connReader %p): terminal error: %v", c, err) - c.rxErr = err - return - } - - var fr frames.Frame - fr, err = c.readFrame() - if err != nil { - continue - } - - debug.Log(1, "RX (connReader %p): %s", c, fr) - - var ( - session *Session - ok bool - ) - - switch body := fr.Body.(type) { - // Server initiated close. - case *frames.PerformClose: - // connWriter will send the close performative ack on its way out. - // it's a SHOULD though, not a MUST. - if body.Error == nil { - return - } - err = body.Error - continue - - // RemoteChannel should be used when frame is Begin - case *frames.PerformBegin: - if body.RemoteChannel == nil { - // since we only support remotely-initiated sessions, this is an error - // TODO: it would be ideal to not have this kill the connection - err = fmt.Errorf("%T: nil RemoteChannel", fr.Body) - continue - } - c.sessionsByChannelMu.RLock() - session, ok = c.sessionsByChannel[*body.RemoteChannel] - c.sessionsByChannelMu.RUnlock() - if !ok { - // this can happen if NewSession() exits due to the context expiring/cancelled - // before the begin ack is received. - err = fmt.Errorf("unexpected remote channel number %d", *body.RemoteChannel) - continue - } - - session.remoteChannel = fr.Channel - sessionsByRemoteChannel[fr.Channel] = session - - case *frames.PerformEnd: - session, ok = sessionsByRemoteChannel[fr.Channel] - if !ok { - err = fmt.Errorf("%T: didn't find channel %d in sessionsByRemoteChannel (PerformEnd)", fr.Body, fr.Channel) - continue - } - // we MUST remove the remote channel from our map as soon as we receive - // the ack (i.e. before passing it on to the session mux) on the session - // ending since the numbers are recycled. - delete(sessionsByRemoteChannel, fr.Channel) - c.deleteSession(session) - - default: - // pass on performative to the correct session - session, ok = sessionsByRemoteChannel[fr.Channel] - if !ok { - err = fmt.Errorf("%T: didn't find channel %d in sessionsByRemoteChannel", fr.Body, fr.Channel) - continue - } - } - - q := session.rxQ.Acquire() - q.Enqueue(fr.Body) - session.rxQ.Release(q) - debug.Log(2, "RX (connReader %p): mux frame to Session (%p): %s", c, session, fr) - } -} - -// readFrame reads a complete frame from c.net. -// it assumes that any read deadline has already been applied. -// used externally by SASL only. -func (c *Conn) readFrame() (frames.Frame, error) { - switch { - // Cheaply reuse free buffer space when fully read. - case c.rxBuf.Len() == 0: - c.rxBuf.Reset() - - // Prevent excessive/unbounded growth by shifting data to beginning of buffer. - case int64(c.rxBuf.Size()) > int64(c.maxFrameSize): - c.rxBuf.Reclaim() - } - - var ( - currentHeader frames.Header // keep track of the current header, for frames split across multiple TCP packets - frameInProgress bool // true if in the middle of receiving data for currentHeader - ) - - for { - // need to read more if buf doesn't contain the complete frame - // or there's not enough in buf to parse the header - if frameInProgress || c.rxBuf.Len() < frames.HeaderSize { - // we MUST reset the idle timeout before each read from net.Conn - if c.idleTimeout > 0 { - _ = c.net.SetReadDeadline(time.Now().Add(c.idleTimeout)) - } - err := c.rxBuf.ReadFromOnce(c.net) - if err != nil { - return frames.Frame{}, err - } - } - - // read more if buf doesn't contain enough to parse the header - if c.rxBuf.Len() < frames.HeaderSize { - continue - } - - // parse the header if a frame isn't in progress - if !frameInProgress { - var err error - currentHeader, err = frames.ParseHeader(&c.rxBuf) - if err != nil { - return frames.Frame{}, err - } - frameInProgress = true - } - - // check size is reasonable - if currentHeader.Size > math.MaxInt32 { // make max size configurable - return frames.Frame{}, errors.New("payload too large") - } - - bodySize := int64(currentHeader.Size - frames.HeaderSize) - - // the full frame hasn't been received, keep reading - if int64(c.rxBuf.Len()) < bodySize { - continue - } - frameInProgress = false - - // check if body is empty (keepalive) - if bodySize == 0 { - debug.Log(3, "RX (connReader %p): received keep-alive frame", c) - continue - } - - // parse the frame - b, ok := c.rxBuf.Next(bodySize) - if !ok { - return frames.Frame{}, fmt.Errorf("buffer EOF; requested bytes: %d, actual size: %d", bodySize, c.rxBuf.Len()) - } - - parsedBody, err := frames.ParseBody(buffer.New(b)) - if err != nil { - return frames.Frame{}, err - } - - return frames.Frame{Channel: currentHeader.Channel, Body: parsedBody}, nil - } -} - -// frameEnvelope is used when sending a frame to connWriter to be written to net.Conn -type frameEnvelope struct { - Ctx context.Context - Frame frames.Frame - - // optional channel that is closed on successful write to net.Conn or contains the write error - // NOTE: use a buffered channel of size 1 when populating - Sent chan error -} - -func (c *Conn) connWriter() { - defer func() { - close(c.txDone) - c.close() - }() - - var ( - // keepalives are sent at a rate of 1/2 idle timeout - keepaliveInterval = c.peerIdleTimeout / 2 - // 0 disables keepalives - keepalivesEnabled = keepaliveInterval > 0 - // set if enable, nil if not; nil channels block forever - keepalive <-chan time.Time - ) - - if keepalivesEnabled { - ticker := time.NewTicker(keepaliveInterval) - defer ticker.Stop() - keepalive = ticker.C - } - - var err error - for { - if err != nil { - debug.Log(1, "TX (connWriter %p): terminal error: %v", c, err) - c.txErr = err - return - } - - select { - // frame write request - case env := <-c.txFrame: - timeout, ctxErr := c.getWriteTimeout(env.Ctx) - if ctxErr != nil { - debug.Log(1, "TX (connWriter %p) deadline exceeded: %s", c, env.Frame) - if env.Sent != nil { - env.Sent <- ctxErr - } - continue - } - - debug.Log(1, "TX (connWriter %p) timeout %s: %s", c, timeout, env.Frame) - err = c.writeFrame(timeout, env.Frame) - if env.Sent != nil { - if err == nil { - close(env.Sent) - } else { - env.Sent <- err - } - } - - // keepalive timer - case <-keepalive: - debug.Log(3, "TX (connWriter %p): sending keep-alive frame", c) - _ = c.net.SetWriteDeadline(time.Now().Add(c.writeTimeout)) - if _, err = c.net.Write(keepaliveFrame); err != nil { - err = &ConnError{inner: err} - } - // It would be slightly more efficient in terms of network - // resources to reset the timer each time a frame is sent. - // However, keepalives are small (8 bytes) and the interval - // is usually on the order of minutes. It does not seem - // worth it to add extra operations in the write path to - // avoid. (To properly reset a timer it needs to be stopped, - // possibly drained, then reset.) - - // connection complete - case <-c.rxtxExit: - // send close performative. note that the spec says we - // SHOULD wait for the ack but we don't HAVE to, in order - // to be resilient to bad actors etc. so we just send - // the close performative and exit. - fr := frames.Frame{ - Type: frames.TypeAMQP, - Body: &frames.PerformClose{}, - } - debug.Log(1, "TX (connWriter %p): %s", c, fr) - c.txErr = c.writeFrame(c.writeTimeout, fr) - return - } - } -} - -// writeFrame writes a frame to the network. -// used externally by SASL only. -// - timeout - the write deadline to set. zero means no deadline -// -// errors are wrapped in a ConnError as they can be returned to outside callers. -func (c *Conn) writeFrame(timeout time.Duration, fr frames.Frame) error { - // writeFrame into txBuf - c.txBuf.Reset() - err := frames.Write(&c.txBuf, fr) - if err != nil { - return &ConnError{inner: err} - } - - // validate the frame isn't exceeding peer's max frame size - requiredFrameSize := c.txBuf.Len() - if uint64(requiredFrameSize) > uint64(c.peerMaxFrameSize) { - return &ConnError{inner: fmt.Errorf("%T frame size %d larger than peer's max frame size %d", fr, requiredFrameSize, c.peerMaxFrameSize)} - } - - if timeout == 0 { - _ = c.net.SetWriteDeadline(time.Time{}) - } else if timeout > 0 { - _ = c.net.SetWriteDeadline(time.Now().Add(timeout)) - } - - // write to network - n, err := c.net.Write(c.txBuf.Bytes()) - if l := c.txBuf.Len(); n > 0 && n < l && err != nil { - debug.Log(1, "TX (writeFrame %p): wrote %d bytes less than len %d: %v", c, n, l, err) - } - if err != nil { - err = &ConnError{inner: err} - } - return err -} - -// writeProtoHeader writes an AMQP protocol header to the -// network -func (c *Conn) writeProtoHeader(pID protoID) error { - _, err := c.net.Write([]byte{'A', 'M', 'Q', 'P', byte(pID), 1, 0, 0}) - return err -} - -// keepaliveFrame is an AMQP frame with no body, used for keepalives -var keepaliveFrame = []byte{0x00, 0x00, 0x00, 0x08, 0x02, 0x00, 0x00, 0x00} - -// SendFrame is used by sessions and links to send frames across the network. -// - ctx is used to provide the write deadline -// - fr is the frame to write to net.Conn -// - sent is the optional channel that will contain the error if the write fails -func (c *Conn) sendFrame(ctx context.Context, fr frames.Frame, sent chan error) { - select { - case c.txFrame <- frameEnvelope{Ctx: ctx, Frame: fr, Sent: sent}: - debug.Log(2, "TX (Conn %p): mux frame to connWriter: %s", c, fr) - case <-c.done: - if sent != nil { - sent <- c.doneErr - } - } -} - -// stateFunc is a state in a state machine. -// -// The state is advanced by returning the next state. -// The state machine concludes when nil is returned. -type stateFunc func(context.Context) (stateFunc, error) - -// negotiateProto determines which proto to negotiate next. -// used externally by SASL only. -func (c *Conn) negotiateProto(ctx context.Context) (stateFunc, error) { - // in the order each must be negotiated - switch { - case c.tlsNegotiation && !c.tlsComplete: - return c.exchangeProtoHeader(protoTLS) - case c.saslHandlers != nil && !c.saslComplete: - return c.exchangeProtoHeader(protoSASL) - default: - return c.exchangeProtoHeader(protoAMQP) - } -} - -type protoID uint8 - -// protocol IDs received in protoHeaders -const ( - protoAMQP protoID = 0x0 - protoTLS protoID = 0x2 - protoSASL protoID = 0x3 -) - -// exchangeProtoHeader performs the round trip exchange of protocol -// headers, validation, and returns the protoID specific next state. -func (c *Conn) exchangeProtoHeader(pID protoID) (stateFunc, error) { - // write the proto header - if err := c.writeProtoHeader(pID); err != nil { - return nil, err - } - - // read response header - p, err := c.readProtoHeader() - if err != nil { - return nil, err - } - - if pID != p.ProtoID { - return nil, fmt.Errorf("unexpected protocol header %#00x, expected %#00x", p.ProtoID, pID) - } - - // go to the proto specific state - switch pID { - case protoAMQP: - return c.openAMQP, nil - case protoTLS: - return c.startTLS, nil - case protoSASL: - return c.negotiateSASL, nil - default: - return nil, fmt.Errorf("unknown protocol ID %#02x", p.ProtoID) - } -} - -// readProtoHeader reads a protocol header packet from c.rxProto. -func (c *Conn) readProtoHeader() (protoHeader, error) { - const protoHeaderSize = 8 - - // only read from the network once our buffer has been exhausted. - // TODO: this preserves existing behavior as some tests rely on this - // implementation detail (it lets you replay a stream of bytes). we - // might want to consider removing this and fixing the tests as the - // protocol doesn't actually work this way. - if c.rxBuf.Len() == 0 { - for { - err := c.rxBuf.ReadFromOnce(c.net) - if err != nil { - return protoHeader{}, err - } - - // read more if buf doesn't contain enough to parse the header - if c.rxBuf.Len() >= protoHeaderSize { - break - } - } - } - - buf, ok := c.rxBuf.Next(protoHeaderSize) - if !ok { - return protoHeader{}, errors.New("invalid protoHeader") - } - // bounds check hint to compiler; see golang.org/issue/14808 - _ = buf[protoHeaderSize-1] - - if !bytes.Equal(buf[:4], []byte{'A', 'M', 'Q', 'P'}) { - return protoHeader{}, fmt.Errorf("unexpected protocol %q", buf[:4]) - } - - p := protoHeader{ - ProtoID: protoID(buf[4]), - Major: buf[5], - Minor: buf[6], - Revision: buf[7], - } - - if p.Major != 1 || p.Minor != 0 || p.Revision != 0 { - return protoHeader{}, fmt.Errorf("unexpected protocol version %d.%d.%d", p.Major, p.Minor, p.Revision) - } - - return p, nil -} - -// startTLS wraps the conn with TLS and returns to Client.negotiateProto -func (c *Conn) startTLS(ctx context.Context) (stateFunc, error) { - c.initTLSConfig() - - _ = c.net.SetReadDeadline(time.Time{}) // clear timeout - - // wrap existing net.Conn and perform TLS handshake - tlsConn := tls.Client(c.net, c.tlsConfig) - if err := tlsConn.HandshakeContext(ctx); err != nil { - return nil, err - } - - // swap net.Conn - c.net = tlsConn - c.tlsComplete = true - - // go to next protocol - return c.negotiateProto, nil -} - -// openAMQP round trips the AMQP open performative -func (c *Conn) openAMQP(ctx context.Context) (stateFunc, error) { - // send open frame - open := &frames.PerformOpen{ - ContainerID: c.containerID, - Hostname: c.hostname, - MaxFrameSize: c.maxFrameSize, - ChannelMax: c.channelMax, - IdleTimeout: c.idleTimeout / 2, // per spec, advertise half our idle timeout - Properties: c.properties, - } - fr := frames.Frame{ - Type: frames.TypeAMQP, - Body: open, - Channel: 0, - } - debug.Log(1, "TX (openAMQP %p): %s", c, fr) - timeout, err := c.getWriteTimeout(ctx) - if err != nil { - return nil, err - } - if err = c.writeFrame(timeout, fr); err != nil { - return nil, err - } - - // get the response - fr, err = c.readSingleFrame() - if err != nil { - return nil, err - } - debug.Log(1, "RX (openAMQP %p): %s", c, fr) - o, ok := fr.Body.(*frames.PerformOpen) - if !ok { - return nil, fmt.Errorf("openAMQP: unexpected frame type %T", fr.Body) - } - - // update peer settings - if o.MaxFrameSize > 0 { - c.peerMaxFrameSize = o.MaxFrameSize - } - if o.IdleTimeout > 0 { - // TODO: reject very small idle timeouts - c.peerIdleTimeout = o.IdleTimeout - } - if o.ChannelMax < c.channelMax { - c.channelMax = o.ChannelMax - } - - // connection established, exit state machine - return nil, nil -} - -// negotiateSASL returns the SASL handler for the first matched -// mechanism specified by the server -func (c *Conn) negotiateSASL(context.Context) (stateFunc, error) { - // read mechanisms frame - fr, err := c.readSingleFrame() - if err != nil { - return nil, err - } - debug.Log(1, "RX (negotiateSASL %p): %s", c, fr) - sm, ok := fr.Body.(*frames.SASLMechanisms) - if !ok { - return nil, fmt.Errorf("negotiateSASL: unexpected frame type %T", fr.Body) - } - - // return first match in c.saslHandlers based on order received - for _, mech := range sm.Mechanisms { - if state, ok := c.saslHandlers[mech]; ok { - return state, nil - } - } - - // no match - return nil, fmt.Errorf("no supported auth mechanism (%v)", sm.Mechanisms) // TODO: send "auth not supported" frame? -} - -// saslOutcome processes the SASL outcome frame and return Client.negotiateProto -// on success. -// -// SASL handlers return this stateFunc when the mechanism specific negotiation -// has completed. -// used externally by SASL only. -func (c *Conn) saslOutcome(context.Context) (stateFunc, error) { - // read outcome frame - fr, err := c.readSingleFrame() - if err != nil { - return nil, err - } - debug.Log(1, "RX (saslOutcome %p): %s", c, fr) - so, ok := fr.Body.(*frames.SASLOutcome) - if !ok { - return nil, fmt.Errorf("saslOutcome: unexpected frame type %T", fr.Body) - } - - // check if auth succeeded - if so.Code != encoding.CodeSASLOK { - return nil, fmt.Errorf("SASL PLAIN auth failed with code %#00x: %s", so.Code, so.AdditionalData) // implement Stringer for so.Code - } - - // return to c.negotiateProto - c.saslComplete = true - return c.negotiateProto, nil -} - -// readSingleFrame is used during connection establishment to read a single frame. -// -// After setup, conn.connReader handles incoming frames. -func (c *Conn) readSingleFrame() (frames.Frame, error) { - fr, err := c.readFrame() - if err != nil { - return frames.Frame{}, err - } - - return fr, nil -} - -// getWriteTimeout returns the timeout as calculated from the context's deadline -// or the default write timeout if the context has no deadline. -// if the context has timed out or was cancelled, an error is returned. -func (c *Conn) getWriteTimeout(ctx context.Context) (time.Duration, error) { - if deadline, ok := ctx.Deadline(); ok { - until := time.Until(deadline) - if until <= 0 { - return 0, context.DeadlineExceeded - } - return until, nil - } - return c.writeTimeout, nil -} - -type protoHeader struct { - ProtoID protoID - Major uint8 - Minor uint8 - Revision uint8 -} diff --git a/sdk/messaging/azservicebus/internal/go-amqp/const.go b/sdk/messaging/azservicebus/internal/go-amqp/const.go deleted file mode 100644 index 70a2aeff6b61..000000000000 --- a/sdk/messaging/azservicebus/internal/go-amqp/const.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright (C) 2017 Kale Blankenship -// Portions Copyright (c) Microsoft Corporation - -package amqp - -import "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp/internal/encoding" - -// Sender Settlement Modes -const ( - // Sender will send all deliveries initially unsettled to the receiver. - SenderSettleModeUnsettled SenderSettleMode = encoding.SenderSettleModeUnsettled - - // Sender will send all deliveries settled to the receiver. - SenderSettleModeSettled SenderSettleMode = encoding.SenderSettleModeSettled - - // Sender MAY send a mixture of settled and unsettled deliveries to the receiver. - SenderSettleModeMixed SenderSettleMode = encoding.SenderSettleModeMixed -) - -// SenderSettleMode specifies how the sender will settle messages. -type SenderSettleMode = encoding.SenderSettleMode - -func senderSettleModeValue(m *SenderSettleMode) SenderSettleMode { - if m == nil { - return SenderSettleModeMixed - } - return *m -} - -// Receiver Settlement Modes -const ( - // Receiver is the first to consider the message as settled. - // Once the corresponding disposition frame is sent, the message - // is considered to be settled. - ReceiverSettleModeFirst ReceiverSettleMode = encoding.ReceiverSettleModeFirst - - // Receiver is the second to consider the message as settled. - // Once the corresponding disposition frame is sent, the settlement - // is considered in-flight and the message will not be considered as - // settled until the sender replies acknowledging the settlement. - ReceiverSettleModeSecond ReceiverSettleMode = encoding.ReceiverSettleModeSecond -) - -// ReceiverSettleMode specifies how the receiver will settle messages. -type ReceiverSettleMode = encoding.ReceiverSettleMode - -func receiverSettleModeValue(m *ReceiverSettleMode) ReceiverSettleMode { - if m == nil { - return ReceiverSettleModeFirst - } - return *m -} - -// Durability Policies -const ( - // No terminus state is retained durably. - DurabilityNone Durability = encoding.DurabilityNone - - // Only the existence and configuration of the terminus is - // retained durably. - DurabilityConfiguration Durability = encoding.DurabilityConfiguration - - // In addition to the existence and configuration of the - // terminus, the unsettled state for durable messages is - // retained durably. - DurabilityUnsettledState Durability = encoding.DurabilityUnsettledState -) - -// Durability specifies the durability of a link. -type Durability = encoding.Durability - -// Expiry Policies -const ( - // The expiry timer starts when terminus is detached. - ExpiryPolicyLinkDetach ExpiryPolicy = encoding.ExpiryLinkDetach - - // The expiry timer starts when the most recently - // associated session is ended. - ExpiryPolicySessionEnd ExpiryPolicy = encoding.ExpirySessionEnd - - // The expiry timer starts when most recently associated - // connection is closed. - ExpiryPolicyConnectionClose ExpiryPolicy = encoding.ExpiryConnectionClose - - // The terminus never expires. - ExpiryPolicyNever ExpiryPolicy = encoding.ExpiryNever -) - -// ExpiryPolicy specifies when the expiry timer of a terminus -// starts counting down from the timeout value. -// -// If the link is subsequently re-attached before the terminus is expired, -// then the count down is aborted. If the conditions for the -// terminus-expiry-policy are subsequently re-met, the expiry timer restarts -// from its originally configured timeout value. -type ExpiryPolicy = encoding.ExpiryPolicy diff --git a/sdk/messaging/azservicebus/internal/go-amqp/creditor.go b/sdk/messaging/azservicebus/internal/go-amqp/creditor.go deleted file mode 100644 index 184702bca7d2..000000000000 --- a/sdk/messaging/azservicebus/internal/go-amqp/creditor.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright (c) Microsoft Corporation - -package amqp - -import ( - "context" - "errors" - "sync" -) - -type creditor struct { - mu sync.Mutex - - // future values for the next flow frame. - pendingDrain bool - creditsToAdd uint32 - - // drained is set when a drain is active and we're waiting - // for the corresponding flow from the remote. - drained chan struct{} -} - -var ( - errLinkDraining = errors.New("link is currently draining, no credits can be added") - errAlreadyDraining = errors.New("drain already in process") -) - -// EndDrain ends the current drain, unblocking any active Drain calls. -func (mc *creditor) EndDrain() { - mc.mu.Lock() - defer mc.mu.Unlock() - - if mc.drained != nil { - close(mc.drained) - mc.drained = nil - } -} - -// FlowBits gets gets the proper values for the next flow frame -// and resets the internal state. -// Returns: -// -// (drain: true, credits: 0) if a flow is needed (drain) -// (drain: false, credits > 0) if a flow is needed (issue credit) -// (drain: false, credits == 0) if no flow needed. -func (mc *creditor) FlowBits(currentCredits uint32) (bool, uint32) { - mc.mu.Lock() - defer mc.mu.Unlock() - - drain := mc.pendingDrain - var credits uint32 - - if mc.pendingDrain { - // only send one drain request - mc.pendingDrain = false - } - - // either: - // drain is true (ie, we're going to send a drain frame, and the credits for it should be 0) - // mc.creditsToAdd == 0 (no flow frame needed, no new credits are being issued) - if drain || mc.creditsToAdd == 0 { - credits = 0 - } else { - credits = mc.creditsToAdd + currentCredits - } - - mc.creditsToAdd = 0 - - return drain, credits -} - -// Drain initiates a drain and blocks until EndDrain is called. -// If the context's deadline expires or is cancelled before the operation -// completes, the drain might not have happened. -func (mc *creditor) Drain(ctx context.Context, r *Receiver) error { - mc.mu.Lock() - - if mc.drained != nil { - mc.mu.Unlock() - return errAlreadyDraining - } - - mc.drained = make(chan struct{}) - // use a local copy to avoid racing with EndDrain() - drained := mc.drained - mc.pendingDrain = true - - mc.mu.Unlock() - - // cause mux() to check our flow conditions. - select { - case r.receiverReady <- struct{}{}: - default: - } - - // send drain, wait for responding flow frame - select { - case <-drained: - return nil - case <-r.l.done: - return r.l.doneErr - case <-ctx.Done(): - return ctx.Err() - } -} - -// IssueCredit queues up additional credits to be requested at the next -// call of FlowBits() -func (mc *creditor) IssueCredit(credits uint32) error { - mc.mu.Lock() - defer mc.mu.Unlock() - - if mc.drained != nil { - return errLinkDraining - } - - mc.creditsToAdd += credits - return nil -} diff --git a/sdk/messaging/azservicebus/internal/go-amqp/errors.go b/sdk/messaging/azservicebus/internal/go-amqp/errors.go deleted file mode 100644 index 126fc330ab05..000000000000 --- a/sdk/messaging/azservicebus/internal/go-amqp/errors.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright (C) 2017 Kale Blankenship -// Portions Copyright (c) Microsoft Corporation - -package amqp - -import ( - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp/internal/encoding" -) - -// ErrCond is an AMQP defined error condition. -// See http://docs.oasis-open.org/amqp/core/v1.0/os/amqp-core-transport-v1.0-os.html#type-amqp-error for info on their meaning. -type ErrCond = encoding.ErrCond - -// Error Conditions -const ( - // AMQP Errors - ErrCondDecodeError ErrCond = "amqp:decode-error" - ErrCondFrameSizeTooSmall ErrCond = "amqp:frame-size-too-small" - ErrCondIllegalState ErrCond = "amqp:illegal-state" - ErrCondInternalError ErrCond = "amqp:internal-error" - ErrCondInvalidField ErrCond = "amqp:invalid-field" - ErrCondNotAllowed ErrCond = "amqp:not-allowed" - ErrCondNotFound ErrCond = "amqp:not-found" - ErrCondNotImplemented ErrCond = "amqp:not-implemented" - ErrCondPreconditionFailed ErrCond = "amqp:precondition-failed" - ErrCondResourceDeleted ErrCond = "amqp:resource-deleted" - ErrCondResourceLimitExceeded ErrCond = "amqp:resource-limit-exceeded" - ErrCondResourceLocked ErrCond = "amqp:resource-locked" - ErrCondUnauthorizedAccess ErrCond = "amqp:unauthorized-access" - - // Connection Errors - ErrCondConnectionForced ErrCond = "amqp:connection:forced" - ErrCondConnectionRedirect ErrCond = "amqp:connection:redirect" - ErrCondFramingError ErrCond = "amqp:connection:framing-error" - - // Session Errors - ErrCondErrantLink ErrCond = "amqp:session:errant-link" - ErrCondHandleInUse ErrCond = "amqp:session:handle-in-use" - ErrCondUnattachedHandle ErrCond = "amqp:session:unattached-handle" - ErrCondWindowViolation ErrCond = "amqp:session:window-violation" - - // Link Errors - ErrCondDetachForced ErrCond = "amqp:link:detach-forced" - ErrCondLinkRedirect ErrCond = "amqp:link:redirect" - ErrCondMessageSizeExceeded ErrCond = "amqp:link:message-size-exceeded" - ErrCondStolen ErrCond = "amqp:link:stolen" - ErrCondTransferLimitExceeded ErrCond = "amqp:link:transfer-limit-exceeded" -) - -// Error is an AMQP error. -type Error = encoding.Error - -// LinkError is returned by methods on Sender/Receiver when the link has closed. -type LinkError struct { - // RemoteErr contains any error information provided by the peer if the peer detached the link. - RemoteErr *Error - - inner error -} - -// Error implements the error interface for LinkError. -func (e *LinkError) Error() string { - if e.RemoteErr == nil && e.inner == nil { - return "amqp: link closed" - } else if e.RemoteErr != nil { - return e.RemoteErr.Error() - } - return e.inner.Error() -} - -// ConnError is returned by methods on Conn and propagated to Session and Senders/Receivers -// when the connection has been closed. -type ConnError struct { - // RemoteErr contains any error information provided by the peer if the peer closed the AMQP connection. - RemoteErr *Error - - inner error -} - -// Error implements the error interface for ConnectionError. -func (e *ConnError) Error() string { - if e.RemoteErr == nil && e.inner == nil { - return "amqp: connection closed" - } else if e.RemoteErr != nil { - return e.RemoteErr.Error() - } - return e.inner.Error() -} - -// SessionError is returned by methods on Session and propagated to Senders/Receivers -// when the session has been closed. -type SessionError struct { - // RemoteErr contains any error information provided by the peer if the peer closed the session. - RemoteErr *Error - - inner error -} - -// Error implements the error interface for SessionError. -func (e *SessionError) Error() string { - if e.RemoteErr == nil && e.inner == nil { - return "amqp: session closed" - } else if e.RemoteErr != nil { - return e.RemoteErr.Error() - } - return e.inner.Error() -} diff --git a/sdk/messaging/azservicebus/internal/go-amqp/internal/bitmap/bitmap.go b/sdk/messaging/azservicebus/internal/go-amqp/internal/bitmap/bitmap.go deleted file mode 100644 index d4d682e9199e..000000000000 --- a/sdk/messaging/azservicebus/internal/go-amqp/internal/bitmap/bitmap.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright (C) 2017 Kale Blankenship -// Portions Copyright (c) Microsoft Corporation - -package bitmap - -import ( - "math/bits" -) - -// bitmap is a lazily initialized bitmap -type Bitmap struct { - max uint32 - bits []uint64 -} - -func New(max uint32) *Bitmap { - return &Bitmap{max: max} -} - -// add sets n in the bitmap. -// -// bits will be expanded as needed. -// -// If n is greater than max, the call has no effect. -func (b *Bitmap) Add(n uint32) { - if n > b.max { - return - } - - var ( - idx = n / 64 - offset = n % 64 - ) - - if l := len(b.bits); int(idx) >= l { - b.bits = append(b.bits, make([]uint64, int(idx)-l+1)...) - } - - b.bits[idx] |= 1 << offset -} - -// remove clears n from the bitmap. -// -// If n is not set or greater than max the call has not effect. -func (b *Bitmap) Remove(n uint32) { - var ( - idx = n / 64 - offset = n % 64 - ) - - if int(idx) >= len(b.bits) { - return - } - - b.bits[idx] &= ^uint64(1 << offset) -} - -// next sets and returns the lowest unset bit in the bitmap. -// -// bits will be expanded if necessary. -// -// If there are no unset bits below max, the second return -// value will be false. -func (b *Bitmap) Next() (uint32, bool) { - // find the first unset bit - for i, v := range b.bits { - // skip if all bits are set - if v == ^uint64(0) { - continue - } - - var ( - offset = bits.TrailingZeros64(^v) // invert and count zeroes - next = uint32(i*64 + offset) - ) - - // check if in bounds - if next > b.max { - return next, false - } - - // set bit - b.bits[i] |= 1 << uint32(offset) - return next, true - } - - // no unset bits in the current slice, - // check if the full range has been allocated - if uint64(len(b.bits)*64) > uint64(b.max) { - return 0, false - } - - // full range not allocated, append entry with first - // bit set - b.bits = append(b.bits, 1) - - // return the value of the first bit - return uint32(len(b.bits)-1) * 64, true -} diff --git a/sdk/messaging/azservicebus/internal/go-amqp/internal/buffer/buffer.go b/sdk/messaging/azservicebus/internal/go-amqp/internal/buffer/buffer.go deleted file mode 100644 index b82e5fab76a6..000000000000 --- a/sdk/messaging/azservicebus/internal/go-amqp/internal/buffer/buffer.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright (C) 2017 Kale Blankenship -// Portions Copyright (c) Microsoft Corporation - -package buffer - -import ( - "encoding/binary" - "io" -) - -// buffer is similar to bytes.Buffer but specialized for this package -type Buffer struct { - b []byte - i int -} - -func New(b []byte) *Buffer { - return &Buffer{b: b} -} - -func (b *Buffer) Next(n int64) ([]byte, bool) { - if b.readCheck(n) { - buf := b.b[b.i:len(b.b)] - b.i = len(b.b) - return buf, false - } - - buf := b.b[b.i : b.i+int(n)] - b.i += int(n) - return buf, true -} - -func (b *Buffer) Skip(n int) { - b.i += n -} - -func (b *Buffer) Reset() { - b.b = b.b[:0] - b.i = 0 -} - -// reclaim shifts used buffer space to the beginning of the -// underlying slice. -func (b *Buffer) Reclaim() { - l := b.Len() - copy(b.b[:l], b.b[b.i:]) - b.b = b.b[:l] - b.i = 0 -} - -func (b *Buffer) readCheck(n int64) bool { - return int64(b.i)+n > int64(len(b.b)) -} - -func (b *Buffer) ReadByte() (byte, error) { - if b.readCheck(1) { - return 0, io.EOF - } - - byte_ := b.b[b.i] - b.i++ - return byte_, nil -} - -func (b *Buffer) PeekByte() (byte, error) { - if b.readCheck(1) { - return 0, io.EOF - } - - return b.b[b.i], nil -} - -func (b *Buffer) ReadUint16() (uint16, error) { - if b.readCheck(2) { - return 0, io.EOF - } - - n := binary.BigEndian.Uint16(b.b[b.i:]) - b.i += 2 - return n, nil -} - -func (b *Buffer) ReadUint32() (uint32, error) { - if b.readCheck(4) { - return 0, io.EOF - } - - n := binary.BigEndian.Uint32(b.b[b.i:]) - b.i += 4 - return n, nil -} - -func (b *Buffer) ReadUint64() (uint64, error) { - if b.readCheck(8) { - return 0, io.EOF - } - - n := binary.BigEndian.Uint64(b.b[b.i : b.i+8]) - b.i += 8 - return n, nil -} - -func (b *Buffer) ReadFromOnce(r io.Reader) error { - const minRead = 512 - - l := len(b.b) - if cap(b.b)-l < minRead { - total := l * 2 - if total == 0 { - total = minRead - } - new := make([]byte, l, total) - copy(new, b.b) - b.b = new - } - - n, err := r.Read(b.b[l:cap(b.b)]) - b.b = b.b[:l+n] - return err -} - -func (b *Buffer) Append(p []byte) { - b.b = append(b.b, p...) -} - -func (b *Buffer) AppendByte(bb byte) { - b.b = append(b.b, bb) -} - -func (b *Buffer) AppendString(s string) { - b.b = append(b.b, s...) -} - -func (b *Buffer) Len() int { - return len(b.b) - b.i -} - -func (b *Buffer) Size() int { - return b.i -} - -func (b *Buffer) Bytes() []byte { - return b.b[b.i:] -} - -func (b *Buffer) Detach() []byte { - temp := b.b - b.b = nil - b.i = 0 - return temp -} - -func (b *Buffer) AppendUint16(n uint16) { - b.b = append(b.b, - byte(n>>8), - byte(n), - ) -} - -func (b *Buffer) AppendUint32(n uint32) { - b.b = append(b.b, - byte(n>>24), - byte(n>>16), - byte(n>>8), - byte(n), - ) -} - -func (b *Buffer) AppendUint64(n uint64) { - b.b = append(b.b, - byte(n>>56), - byte(n>>48), - byte(n>>40), - byte(n>>32), - byte(n>>24), - byte(n>>16), - byte(n>>8), - byte(n), - ) -} diff --git a/sdk/messaging/azservicebus/internal/go-amqp/internal/debug/debug.go b/sdk/messaging/azservicebus/internal/go-amqp/internal/debug/debug.go deleted file mode 100644 index 3e6821e1f723..000000000000 --- a/sdk/messaging/azservicebus/internal/go-amqp/internal/debug/debug.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright (C) 2017 Kale Blankenship -// Portions Copyright (c) Microsoft Corporation - -//go:build !debug -// +build !debug - -package debug - -// dummy functions used when debugging is not enabled - -// Log writes the formatted string to stderr. -// Level indicates the verbosity of the messages to log. -// The greater the value, the more verbose messages will be logged. -func Log(_ int, _ string, _ ...any) {} - -// Assert panics if the specified condition is false. -func Assert(bool) {} - -// Assert panics with the provided message if the specified condition is false. -func Assertf(bool, string, ...any) {} diff --git a/sdk/messaging/azservicebus/internal/go-amqp/internal/debug/debug_debug.go b/sdk/messaging/azservicebus/internal/go-amqp/internal/debug/debug_debug.go deleted file mode 100644 index 96d53768a5c9..000000000000 --- a/sdk/messaging/azservicebus/internal/go-amqp/internal/debug/debug_debug.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (C) 2017 Kale Blankenship -// Portions Copyright (c) Microsoft Corporation - -//go:build debug -// +build debug - -package debug - -import ( - "fmt" - "log" - "os" - "strconv" -) - -var ( - debugLevel = 1 - logger = log.New(os.Stderr, "", log.Lmicroseconds) -) - -func init() { - level, err := strconv.Atoi(os.Getenv("DEBUG_LEVEL")) - if err != nil { - return - } - - debugLevel = level -} - -// Log writes the formatted string to stderr. -// Level indicates the verbosity of the messages to log. -// The greater the value, the more verbose messages will be logged. -func Log(level int, format string, v ...any) { - if level <= debugLevel { - logger.Printf(format, v...) - } -} - -// Assert panics if the specified condition is false. -func Assert(condition bool) { - if !condition { - panic("assertion failed!") - } -} - -// Assert panics with the provided message if the specified condition is false. -func Assertf(condition bool, msg string, v ...any) { - if !condition { - panic(fmt.Sprintf(msg, v...)) - } -} diff --git a/sdk/messaging/azservicebus/internal/go-amqp/internal/encoding/decode.go b/sdk/messaging/azservicebus/internal/go-amqp/internal/encoding/decode.go deleted file mode 100644 index ad360628547a..000000000000 --- a/sdk/messaging/azservicebus/internal/go-amqp/internal/encoding/decode.go +++ /dev/null @@ -1,1150 +0,0 @@ -// Copyright (C) 2017 Kale Blankenship -// Portions Copyright (c) Microsoft Corporation - -package encoding - -import ( - "encoding/binary" - "errors" - "fmt" - "math" - "reflect" - "time" - - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp/internal/buffer" -) - -// unmarshaler is fulfilled by types that can unmarshal -// themselves from AMQP data. -type unmarshaler interface { - Unmarshal(r *buffer.Buffer) error -} - -// unmarshal decodes AMQP encoded data into i. -// -// The decoding method is based on the type of i. -// -// If i implements unmarshaler, i.Unmarshal() will be called. -// -// Pointers to primitive types will be decoded via the appropriate read[Type] function. -// -// If i is a pointer to a pointer (**Type), it will be dereferenced and a new instance -// of (*Type) is allocated via reflection. -// -// Common map types (map[string]string, map[Symbol]any, and -// map[any]any), will be decoded via conversion to the mapStringAny, -// mapSymbolAny, and mapAnyAny types. -func Unmarshal(r *buffer.Buffer, i any) error { - if tryReadNull(r) { - return nil - } - - switch t := i.(type) { - case *int: - val, err := readInt(r) - if err != nil { - return err - } - *t = val - case *int8: - val, err := readSbyte(r) - if err != nil { - return err - } - *t = val - case *int16: - val, err := readShort(r) - if err != nil { - return err - } - *t = val - case *int32: - val, err := readInt32(r) - if err != nil { - return err - } - *t = val - case *int64: - val, err := readLong(r) - if err != nil { - return err - } - *t = val - case *uint64: - val, err := readUlong(r) - if err != nil { - return err - } - *t = val - case *uint32: - val, err := readUint32(r) - if err != nil { - return err - } - *t = val - case **uint32: // fastpath for uint32 pointer fields - val, err := readUint32(r) - if err != nil { - return err - } - *t = &val - case *uint16: - val, err := readUshort(r) - if err != nil { - return err - } - *t = val - case *uint8: - val, err := ReadUbyte(r) - if err != nil { - return err - } - *t = val - case *float32: - val, err := readFloat(r) - if err != nil { - return err - } - *t = val - case *float64: - val, err := readDouble(r) - if err != nil { - return err - } - *t = val - case *string: - val, err := ReadString(r) - if err != nil { - return err - } - *t = val - case *Symbol: - s, err := ReadString(r) - if err != nil { - return err - } - *t = Symbol(s) - case *[]byte: - val, err := readBinary(r) - if err != nil { - return err - } - *t = val - case *bool: - b, err := readBool(r) - if err != nil { - return err - } - *t = b - case *time.Time: - ts, err := readTimestamp(r) - if err != nil { - return err - } - *t = ts - case *[]int8: - return (*arrayInt8)(t).Unmarshal(r) - case *[]uint16: - return (*arrayUint16)(t).Unmarshal(r) - case *[]int16: - return (*arrayInt16)(t).Unmarshal(r) - case *[]uint32: - return (*arrayUint32)(t).Unmarshal(r) - case *[]int32: - return (*arrayInt32)(t).Unmarshal(r) - case *[]uint64: - return (*arrayUint64)(t).Unmarshal(r) - case *[]int64: - return (*arrayInt64)(t).Unmarshal(r) - case *[]float32: - return (*arrayFloat)(t).Unmarshal(r) - case *[]float64: - return (*arrayDouble)(t).Unmarshal(r) - case *[]bool: - return (*arrayBool)(t).Unmarshal(r) - case *[]string: - return (*arrayString)(t).Unmarshal(r) - case *[]Symbol: - return (*arraySymbol)(t).Unmarshal(r) - case *[][]byte: - return (*arrayBinary)(t).Unmarshal(r) - case *[]time.Time: - return (*arrayTimestamp)(t).Unmarshal(r) - case *[]UUID: - return (*arrayUUID)(t).Unmarshal(r) - case *[]any: - return (*list)(t).Unmarshal(r) - case *map[any]any: - return (*mapAnyAny)(t).Unmarshal(r) - case *map[string]any: - return (*mapStringAny)(t).Unmarshal(r) - case *map[Symbol]any: - return (*mapSymbolAny)(t).Unmarshal(r) - case *DeliveryState: - type_, _, err := PeekMessageType(r.Bytes()) - if err != nil { - return err - } - - switch AMQPType(type_) { - case TypeCodeStateAccepted: - *t = new(StateAccepted) - case TypeCodeStateModified: - *t = new(StateModified) - case TypeCodeStateReceived: - *t = new(StateReceived) - case TypeCodeStateRejected: - *t = new(StateRejected) - case TypeCodeStateReleased: - *t = new(StateReleased) - default: - return fmt.Errorf("unexpected type %d for deliveryState", type_) - } - return Unmarshal(r, *t) - - case *any: - v, err := ReadAny(r) - if err != nil { - return err - } - *t = v - - case unmarshaler: - return t.Unmarshal(r) - default: - // handle **T - v := reflect.Indirect(reflect.ValueOf(i)) - - // can't unmarshal into a non-pointer - if v.Kind() != reflect.Ptr { - return fmt.Errorf("unable to unmarshal %T", i) - } - - // if nil pointer, allocate a new value to - // unmarshal into - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - - return Unmarshal(r, v.Interface()) - } - return nil -} - -// unmarshalComposite is a helper for use in a composite's unmarshal() function. -// -// The composite from r will be unmarshaled into zero or more fields. An error -// will be returned if typ does not match the decoded type. -func UnmarshalComposite(r *buffer.Buffer, type_ AMQPType, fields ...UnmarshalField) error { - cType, numFields, err := readCompositeHeader(r) - if err != nil { - return err - } - - // check type matches expectation - if cType != type_ { - return fmt.Errorf("invalid header %#0x for %#0x", cType, type_) - } - - // Validate the field count is less than or equal to the number of fields - // provided. Fields may be omitted by the sender if they are not set. - if numFields > int64(len(fields)) { - return fmt.Errorf("invalid field count %d for %#0x", numFields, type_) - } - - for i, field := range fields[:numFields] { - // If the field is null and handleNull is set, call it. - if tryReadNull(r) { - if field.HandleNull != nil { - err = field.HandleNull() - if err != nil { - return err - } - } - continue - } - - // Unmarshal each of the received fields. - err = Unmarshal(r, field.Field) - if err != nil { - return fmt.Errorf("unmarshaling field %d: %v", i, err) - } - } - - // check and call handleNull for the remaining fields - for _, field := range fields[numFields:] { - if field.HandleNull != nil { - err = field.HandleNull() - if err != nil { - return err - } - } - } - - return nil -} - -// unmarshalField is a struct that contains a field to be unmarshaled into. -// -// An optional nullHandler can be set. If the composite field being unmarshaled -// is null and handleNull is not nil, nullHandler will be called. -type UnmarshalField struct { - Field any - HandleNull NullHandler -} - -// nullHandler is a function to be called when a composite's field -// is null. -type NullHandler func() error - -func readType(r *buffer.Buffer) (AMQPType, error) { - n, err := r.ReadByte() - return AMQPType(n), err -} - -func peekType(r *buffer.Buffer) (AMQPType, error) { - n, err := r.PeekByte() - return AMQPType(n), err -} - -// readCompositeHeader reads and consumes the composite header from r. -func readCompositeHeader(r *buffer.Buffer) (_ AMQPType, fields int64, _ error) { - type_, err := readType(r) - if err != nil { - return 0, 0, err - } - - // compsites always start with 0x0 - if type_ != 0 { - return 0, 0, fmt.Errorf("invalid composite header %#02x", type_) - } - - // next, the composite type is encoded as an AMQP uint8 - v, err := readUlong(r) - if err != nil { - return 0, 0, err - } - - // fields are represented as a list - fields, err = readListHeader(r) - - return AMQPType(v), fields, err -} - -func readListHeader(r *buffer.Buffer) (length int64, _ error) { - type_, err := readType(r) - if err != nil { - return 0, err - } - - listLength := r.Len() - - switch type_ { - case TypeCodeList0: - return 0, nil - case TypeCodeList8: - buf, ok := r.Next(2) - if !ok { - return 0, errors.New("invalid length") - } - _ = buf[1] - - size := int(buf[0]) - if size > listLength-1 { - return 0, errors.New("invalid length") - } - length = int64(buf[1]) - case TypeCodeList32: - buf, ok := r.Next(8) - if !ok { - return 0, errors.New("invalid length") - } - _ = buf[7] - - size := int(binary.BigEndian.Uint32(buf[:4])) - if size > listLength-4 { - return 0, errors.New("invalid length") - } - length = int64(binary.BigEndian.Uint32(buf[4:8])) - default: - return 0, fmt.Errorf("type code %#02x is not a recognized list type", type_) - } - - return length, nil -} - -func readArrayHeader(r *buffer.Buffer) (length int64, _ error) { - type_, err := readType(r) - if err != nil { - return 0, err - } - - arrayLength := r.Len() - - switch type_ { - case TypeCodeArray8: - buf, ok := r.Next(2) - if !ok { - return 0, errors.New("invalid length") - } - _ = buf[1] - - size := int(buf[0]) - if size > arrayLength-1 { - return 0, errors.New("invalid length") - } - length = int64(buf[1]) - case TypeCodeArray32: - buf, ok := r.Next(8) - if !ok { - return 0, errors.New("invalid length") - } - _ = buf[7] - - size := binary.BigEndian.Uint32(buf[:4]) - if int(size) > arrayLength-4 { - return 0, fmt.Errorf("invalid length for type %02x", type_) - } - length = int64(binary.BigEndian.Uint32(buf[4:8])) - default: - return 0, fmt.Errorf("type code %#02x is not a recognized array type", type_) - } - return length, nil -} - -func ReadString(r *buffer.Buffer) (string, error) { - type_, err := readType(r) - if err != nil { - return "", err - } - - var length int64 - switch type_ { - case TypeCodeStr8, TypeCodeSym8: - n, err := r.ReadByte() - if err != nil { - return "", err - } - length = int64(n) - case TypeCodeStr32, TypeCodeSym32: - buf, ok := r.Next(4) - if !ok { - return "", fmt.Errorf("invalid length for type %#02x", type_) - } - length = int64(binary.BigEndian.Uint32(buf)) - default: - return "", fmt.Errorf("type code %#02x is not a recognized string type", type_) - } - - buf, ok := r.Next(length) - if !ok { - return "", errors.New("invalid length") - } - return string(buf), nil -} - -func readBinary(r *buffer.Buffer) ([]byte, error) { - type_, err := readType(r) - if err != nil { - return nil, err - } - - var length int64 - switch type_ { - case TypeCodeVbin8: - n, err := r.ReadByte() - if err != nil { - return nil, err - } - length = int64(n) - case TypeCodeVbin32: - buf, ok := r.Next(4) - if !ok { - return nil, fmt.Errorf("invalid length for type %#02x", type_) - } - length = int64(binary.BigEndian.Uint32(buf)) - default: - return nil, fmt.Errorf("type code %#02x is not a recognized binary type", type_) - } - - if length == 0 { - // An empty value and a nil value are distinct, - // ensure that the returned value is not nil in this case. - return make([]byte, 0), nil - } - - buf, ok := r.Next(length) - if !ok { - return nil, errors.New("invalid length") - } - return append([]byte(nil), buf...), nil -} - -func ReadAny(r *buffer.Buffer) (any, error) { - if tryReadNull(r) { - return nil, nil - } - - type_, err := peekType(r) - if err != nil { - return nil, errors.New("invalid length") - } - - switch type_ { - // composite - case 0x0: - return readComposite(r) - - // bool - case TypeCodeBool, TypeCodeBoolTrue, TypeCodeBoolFalse: - return readBool(r) - - // uint - case TypeCodeUbyte: - return ReadUbyte(r) - case TypeCodeUshort: - return readUshort(r) - case TypeCodeUint, - TypeCodeSmallUint, - TypeCodeUint0: - return readUint32(r) - case TypeCodeUlong, - TypeCodeSmallUlong, - TypeCodeUlong0: - return readUlong(r) - - // int - case TypeCodeByte: - return readSbyte(r) - case TypeCodeShort: - return readShort(r) - case TypeCodeInt, - TypeCodeSmallint: - return readInt32(r) - case TypeCodeLong, - TypeCodeSmalllong: - return readLong(r) - - // floating point - case TypeCodeFloat: - return readFloat(r) - case TypeCodeDouble: - return readDouble(r) - - // binary - case TypeCodeVbin8, TypeCodeVbin32: - return readBinary(r) - - // strings - case TypeCodeStr8, TypeCodeStr32: - return ReadString(r) - case TypeCodeSym8, TypeCodeSym32: - // symbols currently decoded as string to avoid - // exposing symbol type in message, this may need - // to change if users need to distinguish strings - // from symbols - return ReadString(r) - - // timestamp - case TypeCodeTimestamp: - return readTimestamp(r) - - // UUID - case TypeCodeUUID: - return readUUID(r) - - // arrays - case TypeCodeArray8, TypeCodeArray32: - return readAnyArray(r) - - // lists - case TypeCodeList0, TypeCodeList8, TypeCodeList32: - return readAnyList(r) - - // maps - case TypeCodeMap8: - return readAnyMap(r) - case TypeCodeMap32: - return readAnyMap(r) - - // TODO: implement - case TypeCodeDecimal32: - return nil, errors.New("decimal32 not implemented") - case TypeCodeDecimal64: - return nil, errors.New("decimal64 not implemented") - case TypeCodeDecimal128: - return nil, errors.New("decimal128 not implemented") - case TypeCodeChar: - return nil, errors.New("char not implemented") - default: - return nil, fmt.Errorf("unknown type %#02x", type_) - } -} - -func readAnyMap(r *buffer.Buffer) (any, error) { - var m map[any]any - err := (*mapAnyAny)(&m).Unmarshal(r) - if err != nil { - return nil, err - } - - if len(m) == 0 { - return m, nil - } - - stringKeys := true -Loop: - for key := range m { - switch key.(type) { - case string: - case Symbol: - default: - stringKeys = false - break Loop - } - } - - if stringKeys { - mm := make(map[string]any, len(m)) - for key, value := range m { - switch key := key.(type) { - case string: - mm[key] = value - case Symbol: - mm[string(key)] = value - } - } - return mm, nil - } - - return m, nil -} - -func readAnyList(r *buffer.Buffer) (any, error) { - var a []any - err := (*list)(&a).Unmarshal(r) - return a, err -} - -func readAnyArray(r *buffer.Buffer) (any, error) { - // get the array type - buf := r.Bytes() - if len(buf) < 1 { - return nil, errors.New("invalid length") - } - - var typeIdx int - switch AMQPType(buf[0]) { - case TypeCodeArray8: - typeIdx = 3 - case TypeCodeArray32: - typeIdx = 9 - default: - return nil, fmt.Errorf("invalid array type %02x", buf[0]) - } - if len(buf) < typeIdx+1 { - return nil, errors.New("invalid length") - } - - switch AMQPType(buf[typeIdx]) { - case TypeCodeByte: - var a []int8 - err := (*arrayInt8)(&a).Unmarshal(r) - return a, err - case TypeCodeUbyte: - var a ArrayUByte - err := a.Unmarshal(r) - return a, err - case TypeCodeUshort: - var a []uint16 - err := (*arrayUint16)(&a).Unmarshal(r) - return a, err - case TypeCodeShort: - var a []int16 - err := (*arrayInt16)(&a).Unmarshal(r) - return a, err - case TypeCodeUint0, TypeCodeSmallUint, TypeCodeUint: - var a []uint32 - err := (*arrayUint32)(&a).Unmarshal(r) - return a, err - case TypeCodeSmallint, TypeCodeInt: - var a []int32 - err := (*arrayInt32)(&a).Unmarshal(r) - return a, err - case TypeCodeUlong0, TypeCodeSmallUlong, TypeCodeUlong: - var a []uint64 - err := (*arrayUint64)(&a).Unmarshal(r) - return a, err - case TypeCodeSmalllong, TypeCodeLong: - var a []int64 - err := (*arrayInt64)(&a).Unmarshal(r) - return a, err - case TypeCodeFloat: - var a []float32 - err := (*arrayFloat)(&a).Unmarshal(r) - return a, err - case TypeCodeDouble: - var a []float64 - err := (*arrayDouble)(&a).Unmarshal(r) - return a, err - case TypeCodeBool, TypeCodeBoolTrue, TypeCodeBoolFalse: - var a []bool - err := (*arrayBool)(&a).Unmarshal(r) - return a, err - case TypeCodeStr8, TypeCodeStr32: - var a []string - err := (*arrayString)(&a).Unmarshal(r) - return a, err - case TypeCodeSym8, TypeCodeSym32: - var a []Symbol - err := (*arraySymbol)(&a).Unmarshal(r) - return a, err - case TypeCodeVbin8, TypeCodeVbin32: - var a [][]byte - err := (*arrayBinary)(&a).Unmarshal(r) - return a, err - case TypeCodeTimestamp: - var a []time.Time - err := (*arrayTimestamp)(&a).Unmarshal(r) - return a, err - case TypeCodeUUID: - var a []UUID - err := (*arrayUUID)(&a).Unmarshal(r) - return a, err - default: - return nil, fmt.Errorf("array decoding not implemented for %#02x", buf[typeIdx]) - } -} - -func readComposite(r *buffer.Buffer) (any, error) { - buf := r.Bytes() - - if len(buf) < 2 { - return nil, errors.New("invalid length for composite") - } - - // compsites start with 0x0 - if AMQPType(buf[0]) != 0x0 { - return nil, fmt.Errorf("invalid composite header %#02x", buf[0]) - } - - var compositeType uint64 - switch AMQPType(buf[1]) { - case TypeCodeSmallUlong: - if len(buf) < 3 { - return nil, errors.New("invalid length for smallulong") - } - compositeType = uint64(buf[2]) - case TypeCodeUlong: - if len(buf) < 10 { - return nil, errors.New("invalid length for ulong") - } - compositeType = binary.BigEndian.Uint64(buf[2:]) - } - - if compositeType > math.MaxUint8 { - // try as described type - var dt DescribedType - err := dt.Unmarshal(r) - return dt, err - } - - switch AMQPType(compositeType) { - // Error - case TypeCodeError: - t := new(Error) - err := t.Unmarshal(r) - return t, err - - // Lifetime Policies - case TypeCodeDeleteOnClose: - t := DeleteOnClose - err := t.Unmarshal(r) - return t, err - case TypeCodeDeleteOnNoMessages: - t := DeleteOnNoMessages - err := t.Unmarshal(r) - return t, err - case TypeCodeDeleteOnNoLinks: - t := DeleteOnNoLinks - err := t.Unmarshal(r) - return t, err - case TypeCodeDeleteOnNoLinksOrMessages: - t := DeleteOnNoLinksOrMessages - err := t.Unmarshal(r) - return t, err - - // Delivery States - case TypeCodeStateAccepted: - t := new(StateAccepted) - err := t.Unmarshal(r) - return t, err - case TypeCodeStateModified: - t := new(StateModified) - err := t.Unmarshal(r) - return t, err - case TypeCodeStateReceived: - t := new(StateReceived) - err := t.Unmarshal(r) - return t, err - case TypeCodeStateRejected: - t := new(StateRejected) - err := t.Unmarshal(r) - return t, err - case TypeCodeStateReleased: - t := new(StateReleased) - err := t.Unmarshal(r) - return t, err - - case TypeCodeOpen, - TypeCodeBegin, - TypeCodeAttach, - TypeCodeFlow, - TypeCodeTransfer, - TypeCodeDisposition, - TypeCodeDetach, - TypeCodeEnd, - TypeCodeClose, - TypeCodeSource, - TypeCodeTarget, - TypeCodeMessageHeader, - TypeCodeDeliveryAnnotations, - TypeCodeMessageAnnotations, - TypeCodeMessageProperties, - TypeCodeApplicationProperties, - TypeCodeApplicationData, - TypeCodeAMQPSequence, - TypeCodeAMQPValue, - TypeCodeFooter, - TypeCodeSASLMechanism, - TypeCodeSASLInit, - TypeCodeSASLChallenge, - TypeCodeSASLResponse, - TypeCodeSASLOutcome: - return nil, fmt.Errorf("readComposite unmarshal not implemented for %#02x", compositeType) - - default: - // try as described type - var dt DescribedType - err := dt.Unmarshal(r) - return dt, err - } -} - -func readTimestamp(r *buffer.Buffer) (time.Time, error) { - type_, err := readType(r) - if err != nil { - return time.Time{}, err - } - - if type_ != TypeCodeTimestamp { - return time.Time{}, fmt.Errorf("invalid type for timestamp %02x", type_) - } - - n, err := r.ReadUint64() - ms := int64(n) - return time.Unix(ms/1000, (ms%1000)*1000000).UTC(), err -} - -func readInt(r *buffer.Buffer) (int, error) { - type_, err := peekType(r) - if err != nil { - return 0, err - } - - switch type_ { - // Unsigned - case TypeCodeUbyte: - n, err := ReadUbyte(r) - return int(n), err - case TypeCodeUshort: - n, err := readUshort(r) - return int(n), err - case TypeCodeUint0, TypeCodeSmallUint, TypeCodeUint: - n, err := readUint32(r) - return int(n), err - case TypeCodeUlong0, TypeCodeSmallUlong, TypeCodeUlong: - n, err := readUlong(r) - return int(n), err - - // Signed - case TypeCodeByte: - n, err := readSbyte(r) - return int(n), err - case TypeCodeShort: - n, err := readShort(r) - return int(n), err - case TypeCodeSmallint, TypeCodeInt: - n, err := readInt32(r) - return int(n), err - case TypeCodeSmalllong, TypeCodeLong: - n, err := readLong(r) - return int(n), err - default: - return 0, fmt.Errorf("type code %#02x is not a recognized number type", type_) - } -} - -func readLong(r *buffer.Buffer) (int64, error) { - type_, err := readType(r) - if err != nil { - return 0, err - } - - switch type_ { - case TypeCodeSmalllong: - n, err := r.ReadByte() - return int64(int8(n)), err - case TypeCodeLong: - n, err := r.ReadUint64() - return int64(n), err - default: - return 0, fmt.Errorf("invalid type for uint32 %02x", type_) - } -} - -func readInt32(r *buffer.Buffer) (int32, error) { - type_, err := readType(r) - if err != nil { - return 0, err - } - - switch type_ { - case TypeCodeSmallint: - n, err := r.ReadByte() - return int32(int8(n)), err - case TypeCodeInt: - n, err := r.ReadUint32() - return int32(n), err - default: - return 0, fmt.Errorf("invalid type for int32 %02x", type_) - } -} - -func readShort(r *buffer.Buffer) (int16, error) { - type_, err := readType(r) - if err != nil { - return 0, err - } - - if type_ != TypeCodeShort { - return 0, fmt.Errorf("invalid type for short %02x", type_) - } - - n, err := r.ReadUint16() - return int16(n), err -} - -func readSbyte(r *buffer.Buffer) (int8, error) { - type_, err := readType(r) - if err != nil { - return 0, err - } - - if type_ != TypeCodeByte { - return 0, fmt.Errorf("invalid type for int8 %02x", type_) - } - - n, err := r.ReadByte() - return int8(n), err -} - -func ReadUbyte(r *buffer.Buffer) (uint8, error) { - type_, err := readType(r) - if err != nil { - return 0, err - } - - if type_ != TypeCodeUbyte { - return 0, fmt.Errorf("invalid type for ubyte %02x", type_) - } - - return r.ReadByte() -} - -func readUshort(r *buffer.Buffer) (uint16, error) { - type_, err := readType(r) - if err != nil { - return 0, err - } - - if type_ != TypeCodeUshort { - return 0, fmt.Errorf("invalid type for ushort %02x", type_) - } - - return r.ReadUint16() -} - -func readUint32(r *buffer.Buffer) (uint32, error) { - type_, err := readType(r) - if err != nil { - return 0, err - } - - switch type_ { - case TypeCodeUint0: - return 0, nil - case TypeCodeSmallUint: - n, err := r.ReadByte() - return uint32(n), err - case TypeCodeUint: - return r.ReadUint32() - default: - return 0, fmt.Errorf("invalid type for uint32 %02x", type_) - } -} - -func readUlong(r *buffer.Buffer) (uint64, error) { - type_, err := readType(r) - if err != nil { - return 0, err - } - - switch type_ { - case TypeCodeUlong0: - return 0, nil - case TypeCodeSmallUlong: - n, err := r.ReadByte() - return uint64(n), err - case TypeCodeUlong: - return r.ReadUint64() - default: - return 0, fmt.Errorf("invalid type for uint32 %02x", type_) - } -} - -func readFloat(r *buffer.Buffer) (float32, error) { - type_, err := readType(r) - if err != nil { - return 0, err - } - - if type_ != TypeCodeFloat { - return 0, fmt.Errorf("invalid type for float32 %02x", type_) - } - - bits, err := r.ReadUint32() - return math.Float32frombits(bits), err -} - -func readDouble(r *buffer.Buffer) (float64, error) { - type_, err := readType(r) - if err != nil { - return 0, err - } - - if type_ != TypeCodeDouble { - return 0, fmt.Errorf("invalid type for float64 %02x", type_) - } - - bits, err := r.ReadUint64() - return math.Float64frombits(bits), err -} - -func readBool(r *buffer.Buffer) (bool, error) { - type_, err := readType(r) - if err != nil { - return false, err - } - - switch type_ { - case TypeCodeBool: - b, err := r.ReadByte() - return b != 0, err - case TypeCodeBoolTrue: - return true, nil - case TypeCodeBoolFalse: - return false, nil - default: - return false, fmt.Errorf("type code %#02x is not a recognized bool type", type_) - } -} - -func readUint(r *buffer.Buffer) (value uint64, _ error) { - type_, err := readType(r) - if err != nil { - return 0, err - } - - switch type_ { - case TypeCodeUint0, TypeCodeUlong0: - return 0, nil - case TypeCodeUbyte, TypeCodeSmallUint, TypeCodeSmallUlong: - n, err := r.ReadByte() - return uint64(n), err - case TypeCodeUshort: - n, err := r.ReadUint16() - return uint64(n), err - case TypeCodeUint: - n, err := r.ReadUint32() - return uint64(n), err - case TypeCodeUlong: - return r.ReadUint64() - default: - return 0, fmt.Errorf("type code %#02x is not a recognized number type", type_) - } -} - -func readUUID(r *buffer.Buffer) (UUID, error) { - var uuid UUID - - type_, err := readType(r) - if err != nil { - return uuid, err - } - - if type_ != TypeCodeUUID { - return uuid, fmt.Errorf("type code %#00x is not a UUID", type_) - } - - buf, ok := r.Next(16) - if !ok { - return uuid, errors.New("invalid length") - } - copy(uuid[:], buf) - - return uuid, nil -} - -func readMapHeader(r *buffer.Buffer) (count uint32, _ error) { - type_, err := readType(r) - if err != nil { - return 0, err - } - - length := r.Len() - - switch type_ { - case TypeCodeMap8: - buf, ok := r.Next(2) - if !ok { - return 0, errors.New("invalid length") - } - _ = buf[1] - - size := int(buf[0]) - if size > length-1 { - return 0, errors.New("invalid length") - } - count = uint32(buf[1]) - case TypeCodeMap32: - buf, ok := r.Next(8) - if !ok { - return 0, errors.New("invalid length") - } - _ = buf[7] - - size := int(binary.BigEndian.Uint32(buf[:4])) - if size > length-4 { - return 0, errors.New("invalid length") - } - count = binary.BigEndian.Uint32(buf[4:8]) - default: - return 0, fmt.Errorf("invalid map type %#02x", type_) - } - - if int(count) > r.Len() { - return 0, errors.New("invalid length") - } - return count, nil -} diff --git a/sdk/messaging/azservicebus/internal/go-amqp/internal/encoding/encode.go b/sdk/messaging/azservicebus/internal/go-amqp/internal/encoding/encode.go deleted file mode 100644 index 767318c02edc..000000000000 --- a/sdk/messaging/azservicebus/internal/go-amqp/internal/encoding/encode.go +++ /dev/null @@ -1,573 +0,0 @@ -// Copyright (C) 2017 Kale Blankenship -// Portions Copyright (c) Microsoft Corporation - -package encoding - -import ( - "encoding/binary" - "errors" - "fmt" - "math" - "time" - "unicode/utf8" - - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp/internal/buffer" -) - -type marshaler interface { - Marshal(*buffer.Buffer) error -} - -func Marshal(wr *buffer.Buffer, i any) error { - switch t := i.(type) { - case nil: - wr.AppendByte(byte(TypeCodeNull)) - case bool: - if t { - wr.AppendByte(byte(TypeCodeBoolTrue)) - } else { - wr.AppendByte(byte(TypeCodeBoolFalse)) - } - case *bool: - if *t { - wr.AppendByte(byte(TypeCodeBoolTrue)) - } else { - wr.AppendByte(byte(TypeCodeBoolFalse)) - } - case uint: - writeUint64(wr, uint64(t)) - case *uint: - writeUint64(wr, uint64(*t)) - case uint64: - writeUint64(wr, t) - case *uint64: - writeUint64(wr, *t) - case uint32: - writeUint32(wr, t) - case *uint32: - writeUint32(wr, *t) - case uint16: - wr.AppendByte(byte(TypeCodeUshort)) - wr.AppendUint16(t) - case *uint16: - wr.AppendByte(byte(TypeCodeUshort)) - wr.AppendUint16(*t) - case uint8: - wr.Append([]byte{ - byte(TypeCodeUbyte), - t, - }) - case *uint8: - wr.Append([]byte{ - byte(TypeCodeUbyte), - *t, - }) - case int: - writeInt64(wr, int64(t)) - case *int: - writeInt64(wr, int64(*t)) - case int8: - wr.Append([]byte{ - byte(TypeCodeByte), - uint8(t), - }) - case *int8: - wr.Append([]byte{ - byte(TypeCodeByte), - uint8(*t), - }) - case int16: - wr.AppendByte(byte(TypeCodeShort)) - wr.AppendUint16(uint16(t)) - case *int16: - wr.AppendByte(byte(TypeCodeShort)) - wr.AppendUint16(uint16(*t)) - case int32: - writeInt32(wr, t) - case *int32: - writeInt32(wr, *t) - case int64: - writeInt64(wr, t) - case *int64: - writeInt64(wr, *t) - case float32: - writeFloat(wr, t) - case *float32: - writeFloat(wr, *t) - case float64: - writeDouble(wr, t) - case *float64: - writeDouble(wr, *t) - case string: - return writeString(wr, t) - case *string: - return writeString(wr, *t) - case []byte: - return WriteBinary(wr, t) - case *[]byte: - return WriteBinary(wr, *t) - case map[any]any: - return writeMap(wr, t) - case *map[any]any: - return writeMap(wr, *t) - case map[string]any: - return writeMap(wr, t) - case *map[string]any: - return writeMap(wr, *t) - case map[Symbol]any: - return writeMap(wr, t) - case *map[Symbol]any: - return writeMap(wr, *t) - case Unsettled: - return writeMap(wr, t) - case *Unsettled: - return writeMap(wr, *t) - case time.Time: - writeTimestamp(wr, t) - case *time.Time: - writeTimestamp(wr, *t) - case []int8: - return arrayInt8(t).Marshal(wr) - case *[]int8: - return arrayInt8(*t).Marshal(wr) - case []uint16: - return arrayUint16(t).Marshal(wr) - case *[]uint16: - return arrayUint16(*t).Marshal(wr) - case []int16: - return arrayInt16(t).Marshal(wr) - case *[]int16: - return arrayInt16(*t).Marshal(wr) - case []uint32: - return arrayUint32(t).Marshal(wr) - case *[]uint32: - return arrayUint32(*t).Marshal(wr) - case []int32: - return arrayInt32(t).Marshal(wr) - case *[]int32: - return arrayInt32(*t).Marshal(wr) - case []uint64: - return arrayUint64(t).Marshal(wr) - case *[]uint64: - return arrayUint64(*t).Marshal(wr) - case []int64: - return arrayInt64(t).Marshal(wr) - case *[]int64: - return arrayInt64(*t).Marshal(wr) - case []float32: - return arrayFloat(t).Marshal(wr) - case *[]float32: - return arrayFloat(*t).Marshal(wr) - case []float64: - return arrayDouble(t).Marshal(wr) - case *[]float64: - return arrayDouble(*t).Marshal(wr) - case []bool: - return arrayBool(t).Marshal(wr) - case *[]bool: - return arrayBool(*t).Marshal(wr) - case []string: - return arrayString(t).Marshal(wr) - case *[]string: - return arrayString(*t).Marshal(wr) - case []Symbol: - return arraySymbol(t).Marshal(wr) - case *[]Symbol: - return arraySymbol(*t).Marshal(wr) - case [][]byte: - return arrayBinary(t).Marshal(wr) - case *[][]byte: - return arrayBinary(*t).Marshal(wr) - case []time.Time: - return arrayTimestamp(t).Marshal(wr) - case *[]time.Time: - return arrayTimestamp(*t).Marshal(wr) - case []UUID: - return arrayUUID(t).Marshal(wr) - case *[]UUID: - return arrayUUID(*t).Marshal(wr) - case []any: - return list(t).Marshal(wr) - case *[]any: - return list(*t).Marshal(wr) - case marshaler: - return t.Marshal(wr) - default: - return fmt.Errorf("marshal not implemented for %T", i) - } - return nil -} - -func writeInt32(wr *buffer.Buffer, n int32) { - if n < 128 && n >= -128 { - wr.Append([]byte{ - byte(TypeCodeSmallint), - byte(n), - }) - return - } - - wr.AppendByte(byte(TypeCodeInt)) - wr.AppendUint32(uint32(n)) -} - -func writeInt64(wr *buffer.Buffer, n int64) { - if n < 128 && n >= -128 { - wr.Append([]byte{ - byte(TypeCodeSmalllong), - byte(n), - }) - return - } - - wr.AppendByte(byte(TypeCodeLong)) - wr.AppendUint64(uint64(n)) -} - -func writeUint32(wr *buffer.Buffer, n uint32) { - if n == 0 { - wr.AppendByte(byte(TypeCodeUint0)) - return - } - - if n < 256 { - wr.Append([]byte{ - byte(TypeCodeSmallUint), - byte(n), - }) - return - } - - wr.AppendByte(byte(TypeCodeUint)) - wr.AppendUint32(n) -} - -func writeUint64(wr *buffer.Buffer, n uint64) { - if n == 0 { - wr.AppendByte(byte(TypeCodeUlong0)) - return - } - - if n < 256 { - wr.Append([]byte{ - byte(TypeCodeSmallUlong), - byte(n), - }) - return - } - - wr.AppendByte(byte(TypeCodeUlong)) - wr.AppendUint64(n) -} - -func writeFloat(wr *buffer.Buffer, f float32) { - wr.AppendByte(byte(TypeCodeFloat)) - wr.AppendUint32(math.Float32bits(f)) -} - -func writeDouble(wr *buffer.Buffer, f float64) { - wr.AppendByte(byte(TypeCodeDouble)) - wr.AppendUint64(math.Float64bits(f)) -} - -func writeTimestamp(wr *buffer.Buffer, t time.Time) { - wr.AppendByte(byte(TypeCodeTimestamp)) - ms := t.UnixNano() / int64(time.Millisecond) - wr.AppendUint64(uint64(ms)) -} - -// marshalField is a field to be marshaled -type MarshalField struct { - Value any // value to be marshaled, use pointers to avoid interface conversion overhead - Omit bool // indicates that this field should be omitted (set to null) -} - -// marshalComposite is a helper for us in a composite's marshal() function. -// -// The returned bytes include the composite header and fields. Fields with -// omit set to true will be encoded as null or omitted altogether if there are -// no non-null fields after them. -func MarshalComposite(wr *buffer.Buffer, code AMQPType, fields []MarshalField) error { - // lastSetIdx is the last index to have a non-omitted field. - // start at -1 as it's possible to have no fields in a composite - lastSetIdx := -1 - - // marshal each field into it's index in rawFields, - // null fields are skipped, leaving the index nil. - for i, f := range fields { - if f.Omit { - continue - } - lastSetIdx = i - } - - // write header only - if lastSetIdx == -1 { - wr.Append([]byte{ - 0x0, - byte(TypeCodeSmallUlong), - byte(code), - byte(TypeCodeList0), - }) - return nil - } - - // write header - WriteDescriptor(wr, code) - - // write fields - wr.AppendByte(byte(TypeCodeList32)) - - // write temp size, replace later - sizeIdx := wr.Len() - wr.Append([]byte{0, 0, 0, 0}) - preFieldLen := wr.Len() - - // field count - wr.AppendUint32(uint32(lastSetIdx + 1)) - - // write null to each index up to lastSetIdx - for _, f := range fields[:lastSetIdx+1] { - if f.Omit { - wr.AppendByte(byte(TypeCodeNull)) - continue - } - err := Marshal(wr, f.Value) - if err != nil { - return err - } - } - - // fix size - size := uint32(wr.Len() - preFieldLen) - buf := wr.Bytes() - binary.BigEndian.PutUint32(buf[sizeIdx:], size) - - return nil -} - -func WriteDescriptor(wr *buffer.Buffer, code AMQPType) { - wr.Append([]byte{ - 0x0, - byte(TypeCodeSmallUlong), - byte(code), - }) -} - -func writeString(wr *buffer.Buffer, str string) error { - if !utf8.ValidString(str) { - return errors.New("not a valid UTF-8 string") - } - l := len(str) - - switch { - // Str8 - case l < 256: - wr.Append([]byte{ - byte(TypeCodeStr8), - byte(l), - }) - wr.AppendString(str) - return nil - - // Str32 - case uint(l) < math.MaxUint32: - wr.AppendByte(byte(TypeCodeStr32)) - wr.AppendUint32(uint32(l)) - wr.AppendString(str) - return nil - - default: - return errors.New("too long") - } -} - -func WriteBinary(wr *buffer.Buffer, bin []byte) error { - l := len(bin) - - switch { - // List8 - case l < 256: - wr.Append([]byte{ - byte(TypeCodeVbin8), - byte(l), - }) - wr.Append(bin) - return nil - - // List32 - case uint(l) < math.MaxUint32: - wr.AppendByte(byte(TypeCodeVbin32)) - wr.AppendUint32(uint32(l)) - wr.Append(bin) - return nil - - default: - return errors.New("too long") - } -} - -func writeMap(wr *buffer.Buffer, m any) error { - startIdx := wr.Len() - wr.Append([]byte{ - byte(TypeCodeMap32), // type - 0, 0, 0, 0, // size placeholder - 0, 0, 0, 0, // length placeholder - }) - - var pairs int - switch m := m.(type) { - case map[any]any: - pairs = len(m) * 2 - for key, val := range m { - err := Marshal(wr, key) - if err != nil { - return err - } - err = Marshal(wr, val) - if err != nil { - return err - } - } - case map[string]any: - pairs = len(m) * 2 - for key, val := range m { - err := writeString(wr, key) - if err != nil { - return err - } - err = Marshal(wr, val) - if err != nil { - return err - } - } - case map[Symbol]any: - pairs = len(m) * 2 - for key, val := range m { - err := key.Marshal(wr) - if err != nil { - return err - } - err = Marshal(wr, val) - if err != nil { - return err - } - } - case Unsettled: - pairs = len(m) * 2 - for key, val := range m { - err := writeString(wr, key) - if err != nil { - return err - } - err = Marshal(wr, val) - if err != nil { - return err - } - } - case Filter: - pairs = len(m) * 2 - for key, val := range m { - err := key.Marshal(wr) - if err != nil { - return err - } - err = val.Marshal(wr) - if err != nil { - return err - } - } - case Annotations: - pairs = len(m) * 2 - for key, val := range m { - switch key := key.(type) { - case string: - err := Symbol(key).Marshal(wr) - if err != nil { - return err - } - case Symbol: - err := key.Marshal(wr) - if err != nil { - return err - } - case int64: - writeInt64(wr, key) - case int: - writeInt64(wr, int64(key)) - default: - return fmt.Errorf("unsupported Annotations key type %T", key) - } - - err := Marshal(wr, val) - if err != nil { - return err - } - } - default: - return fmt.Errorf("unsupported map type %T", m) - } - - if uint(pairs) > math.MaxUint32-4 { - return errors.New("map contains too many elements") - } - - // overwrite placeholder size and length - bytes := wr.Bytes()[startIdx+1 : startIdx+9] - _ = bytes[7] // bounds check hint - - length := wr.Len() - startIdx - 1 - 4 // -1 for type, -4 for length - binary.BigEndian.PutUint32(bytes[:4], uint32(length)) - binary.BigEndian.PutUint32(bytes[4:8], uint32(pairs)) - - return nil -} - -// type length sizes -const ( - array8TLSize = 2 - array32TLSize = 5 -) - -func writeArrayHeader(wr *buffer.Buffer, length, typeSize int, type_ AMQPType) { - size := length * typeSize - - // array type - if size+array8TLSize <= math.MaxUint8 { - wr.Append([]byte{ - byte(TypeCodeArray8), // type - byte(size + array8TLSize), // size - byte(length), // length - byte(type_), // element type - }) - } else { - wr.AppendByte(byte(TypeCodeArray32)) //type - wr.AppendUint32(uint32(size + array32TLSize)) // size - wr.AppendUint32(uint32(length)) // length - wr.AppendByte(byte(type_)) // element type - } -} - -func writeVariableArrayHeader(wr *buffer.Buffer, length, elementsSizeTotal int, type_ AMQPType) { - // 0xA_ == 1, 0xB_ == 4 - // http://docs.oasis-open.org/amqp/core/v1.0/os/amqp-core-types-v1.0-os.html#doc-idp82960 - elementTypeSize := 1 - if type_&0xf0 == 0xb0 { - elementTypeSize = 4 - } - - size := elementsSizeTotal + (length * elementTypeSize) // size excluding array length - if size+array8TLSize <= math.MaxUint8 { - wr.Append([]byte{ - byte(TypeCodeArray8), // type - byte(size + array8TLSize), // size - byte(length), // length - byte(type_), // element type - }) - } else { - wr.AppendByte(byte(TypeCodeArray32)) // type - wr.AppendUint32(uint32(size + array32TLSize)) // size - wr.AppendUint32(uint32(length)) // length - wr.AppendByte(byte(type_)) // element type - } -} diff --git a/sdk/messaging/azservicebus/internal/go-amqp/internal/encoding/types.go b/sdk/messaging/azservicebus/internal/go-amqp/internal/encoding/types.go deleted file mode 100644 index 1941f3f35ee3..000000000000 --- a/sdk/messaging/azservicebus/internal/go-amqp/internal/encoding/types.go +++ /dev/null @@ -1,2155 +0,0 @@ -// Copyright (C) 2017 Kale Blankenship -// Portions Copyright (c) Microsoft Corporation - -package encoding - -import ( - "encoding/binary" - "encoding/hex" - "errors" - "fmt" - "math" - "reflect" - "time" - "unicode/utf8" - - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp/internal/buffer" -) - -type AMQPType uint8 - -// Type codes -const ( - TypeCodeNull AMQPType = 0x40 - - // Bool - TypeCodeBool AMQPType = 0x56 // boolean with the octet 0x00 being false and octet 0x01 being true - TypeCodeBoolTrue AMQPType = 0x41 - TypeCodeBoolFalse AMQPType = 0x42 - - // Unsigned - TypeCodeUbyte AMQPType = 0x50 // 8-bit unsigned integer (1) - TypeCodeUshort AMQPType = 0x60 // 16-bit unsigned integer in network byte order (2) - TypeCodeUint AMQPType = 0x70 // 32-bit unsigned integer in network byte order (4) - TypeCodeSmallUint AMQPType = 0x52 // unsigned integer value in the range 0 to 255 inclusive (1) - TypeCodeUint0 AMQPType = 0x43 // the uint value 0 (0) - TypeCodeUlong AMQPType = 0x80 // 64-bit unsigned integer in network byte order (8) - TypeCodeSmallUlong AMQPType = 0x53 // unsigned long value in the range 0 to 255 inclusive (1) - TypeCodeUlong0 AMQPType = 0x44 // the ulong value 0 (0) - - // Signed - TypeCodeByte AMQPType = 0x51 // 8-bit two's-complement integer (1) - TypeCodeShort AMQPType = 0x61 // 16-bit two's-complement integer in network byte order (2) - TypeCodeInt AMQPType = 0x71 // 32-bit two's-complement integer in network byte order (4) - TypeCodeSmallint AMQPType = 0x54 // 8-bit two's-complement integer (1) - TypeCodeLong AMQPType = 0x81 // 64-bit two's-complement integer in network byte order (8) - TypeCodeSmalllong AMQPType = 0x55 // 8-bit two's-complement integer - - // Decimal - TypeCodeFloat AMQPType = 0x72 // IEEE 754-2008 binary32 (4) - TypeCodeDouble AMQPType = 0x82 // IEEE 754-2008 binary64 (8) - TypeCodeDecimal32 AMQPType = 0x74 // IEEE 754-2008 decimal32 using the Binary Integer Decimal encoding (4) - TypeCodeDecimal64 AMQPType = 0x84 // IEEE 754-2008 decimal64 using the Binary Integer Decimal encoding (8) - TypeCodeDecimal128 AMQPType = 0x94 // IEEE 754-2008 decimal128 using the Binary Integer Decimal encoding (16) - - // Other - TypeCodeChar AMQPType = 0x73 // a UTF-32BE encoded Unicode character (4) - TypeCodeTimestamp AMQPType = 0x83 // 64-bit two's-complement integer representing milliseconds since the unix epoch - TypeCodeUUID AMQPType = 0x98 // UUID as defined in section 4.1.2 of RFC-4122 - - // Variable Length - TypeCodeVbin8 AMQPType = 0xa0 // up to 2^8 - 1 octets of binary data (1 + variable) - TypeCodeVbin32 AMQPType = 0xb0 // up to 2^32 - 1 octets of binary data (4 + variable) - TypeCodeStr8 AMQPType = 0xa1 // up to 2^8 - 1 octets worth of UTF-8 Unicode (with no byte order mark) (1 + variable) - TypeCodeStr32 AMQPType = 0xb1 // up to 2^32 - 1 octets worth of UTF-8 Unicode (with no byte order mark) (4 +variable) - TypeCodeSym8 AMQPType = 0xa3 // up to 2^8 - 1 seven bit ASCII characters representing a symbolic value (1 + variable) - TypeCodeSym32 AMQPType = 0xb3 // up to 2^32 - 1 seven bit ASCII characters representing a symbolic value (4 + variable) - - // Compound - TypeCodeList0 AMQPType = 0x45 // the empty list (i.e. the list with no elements) (0) - TypeCodeList8 AMQPType = 0xc0 // up to 2^8 - 1 list elements with total size less than 2^8 octets (1 + compound) - TypeCodeList32 AMQPType = 0xd0 // up to 2^32 - 1 list elements with total size less than 2^32 octets (4 + compound) - TypeCodeMap8 AMQPType = 0xc1 // up to 2^8 - 1 octets of encoded map data (1 + compound) - TypeCodeMap32 AMQPType = 0xd1 // up to 2^32 - 1 octets of encoded map data (4 + compound) - TypeCodeArray8 AMQPType = 0xe0 // up to 2^8 - 1 array elements with total size less than 2^8 octets (1 + array) - TypeCodeArray32 AMQPType = 0xf0 // up to 2^32 - 1 array elements with total size less than 2^32 octets (4 + array) - - // Composites - TypeCodeOpen AMQPType = 0x10 - TypeCodeBegin AMQPType = 0x11 - TypeCodeAttach AMQPType = 0x12 - TypeCodeFlow AMQPType = 0x13 - TypeCodeTransfer AMQPType = 0x14 - TypeCodeDisposition AMQPType = 0x15 - TypeCodeDetach AMQPType = 0x16 - TypeCodeEnd AMQPType = 0x17 - TypeCodeClose AMQPType = 0x18 - - TypeCodeSource AMQPType = 0x28 - TypeCodeTarget AMQPType = 0x29 - TypeCodeError AMQPType = 0x1d - - TypeCodeMessageHeader AMQPType = 0x70 - TypeCodeDeliveryAnnotations AMQPType = 0x71 - TypeCodeMessageAnnotations AMQPType = 0x72 - TypeCodeMessageProperties AMQPType = 0x73 - TypeCodeApplicationProperties AMQPType = 0x74 - TypeCodeApplicationData AMQPType = 0x75 - TypeCodeAMQPSequence AMQPType = 0x76 - TypeCodeAMQPValue AMQPType = 0x77 - TypeCodeFooter AMQPType = 0x78 - - TypeCodeStateReceived AMQPType = 0x23 - TypeCodeStateAccepted AMQPType = 0x24 - TypeCodeStateRejected AMQPType = 0x25 - TypeCodeStateReleased AMQPType = 0x26 - TypeCodeStateModified AMQPType = 0x27 - - TypeCodeSASLMechanism AMQPType = 0x40 - TypeCodeSASLInit AMQPType = 0x41 - TypeCodeSASLChallenge AMQPType = 0x42 - TypeCodeSASLResponse AMQPType = 0x43 - TypeCodeSASLOutcome AMQPType = 0x44 - - TypeCodeDeleteOnClose AMQPType = 0x2b - TypeCodeDeleteOnNoLinks AMQPType = 0x2c - TypeCodeDeleteOnNoMessages AMQPType = 0x2d - TypeCodeDeleteOnNoLinksOrMessages AMQPType = 0x2e -) - -// Durability Policies -const ( - // No terminus state is retained durably. - DurabilityNone Durability = 0 - - // Only the existence and configuration of the terminus is - // retained durably. - DurabilityConfiguration Durability = 1 - - // In addition to the existence and configuration of the - // terminus, the unsettled state for durable messages is - // retained durably. - DurabilityUnsettledState Durability = 2 -) - -// Durability specifies the durability of a link. -type Durability uint32 - -func (d *Durability) String() string { - if d == nil { - return "" - } - - switch *d { - case DurabilityNone: - return "none" - case DurabilityConfiguration: - return "configuration" - case DurabilityUnsettledState: - return "unsettled-state" - default: - return fmt.Sprintf("unknown durability %d", *d) - } -} - -func (d Durability) Marshal(wr *buffer.Buffer) error { - return Marshal(wr, uint32(d)) -} - -func (d *Durability) Unmarshal(r *buffer.Buffer) error { - return Unmarshal(r, (*uint32)(d)) -} - -// Expiry Policies -const ( - // The expiry timer starts when terminus is detached. - ExpiryLinkDetach ExpiryPolicy = "link-detach" - - // The expiry timer starts when the most recently - // associated session is ended. - ExpirySessionEnd ExpiryPolicy = "session-end" - - // The expiry timer starts when most recently associated - // connection is closed. - ExpiryConnectionClose ExpiryPolicy = "connection-close" - - // The terminus never expires. - ExpiryNever ExpiryPolicy = "never" -) - -// ExpiryPolicy specifies when the expiry timer of a terminus -// starts counting down from the timeout value. -// -// If the link is subsequently re-attached before the terminus is expired, -// then the count down is aborted. If the conditions for the -// terminus-expiry-policy are subsequently re-met, the expiry timer restarts -// from its originally configured timeout value. -type ExpiryPolicy Symbol - -func ValidateExpiryPolicy(e ExpiryPolicy) error { - switch e { - case ExpiryLinkDetach, - ExpirySessionEnd, - ExpiryConnectionClose, - ExpiryNever: - return nil - default: - return fmt.Errorf("unknown expiry-policy %q", e) - } -} - -func (e ExpiryPolicy) Marshal(wr *buffer.Buffer) error { - return Symbol(e).Marshal(wr) -} - -func (e *ExpiryPolicy) Unmarshal(r *buffer.Buffer) error { - err := Unmarshal(r, (*Symbol)(e)) - if err != nil { - return err - } - return ValidateExpiryPolicy(*e) -} - -func (e *ExpiryPolicy) String() string { - if e == nil { - return "" - } - return string(*e) -} - -// Sender Settlement Modes -const ( - // Sender will send all deliveries initially unsettled to the receiver. - SenderSettleModeUnsettled SenderSettleMode = 0 - - // Sender will send all deliveries settled to the receiver. - SenderSettleModeSettled SenderSettleMode = 1 - - // Sender MAY send a mixture of settled and unsettled deliveries to the receiver. - SenderSettleModeMixed SenderSettleMode = 2 -) - -// SenderSettleMode specifies how the sender will settle messages. -type SenderSettleMode uint8 - -func (m SenderSettleMode) Ptr() *SenderSettleMode { - return &m -} - -func (m *SenderSettleMode) String() string { - if m == nil { - return "" - } - - switch *m { - case SenderSettleModeUnsettled: - return "unsettled" - - case SenderSettleModeSettled: - return "settled" - - case SenderSettleModeMixed: - return "mixed" - - default: - return fmt.Sprintf("unknown sender mode %d", uint8(*m)) - } -} - -func (m SenderSettleMode) Marshal(wr *buffer.Buffer) error { - return Marshal(wr, uint8(m)) -} - -func (m *SenderSettleMode) Unmarshal(r *buffer.Buffer) error { - n, err := ReadUbyte(r) - *m = SenderSettleMode(n) - return err -} - -// Receiver Settlement Modes -const ( - // Receiver will spontaneously settle all incoming transfers. - ReceiverSettleModeFirst ReceiverSettleMode = 0 - - // Receiver will only settle after sending the disposition to the - // sender and receiving a disposition indicating settlement of - // the delivery from the sender. - ReceiverSettleModeSecond ReceiverSettleMode = 1 -) - -// ReceiverSettleMode specifies how the receiver will settle messages. -type ReceiverSettleMode uint8 - -func (m ReceiverSettleMode) Ptr() *ReceiverSettleMode { - return &m -} - -func (m *ReceiverSettleMode) String() string { - if m == nil { - return "" - } - - switch *m { - case ReceiverSettleModeFirst: - return "first" - - case ReceiverSettleModeSecond: - return "second" - - default: - return fmt.Sprintf("unknown receiver mode %d", uint8(*m)) - } -} - -func (m ReceiverSettleMode) Marshal(wr *buffer.Buffer) error { - return Marshal(wr, uint8(m)) -} - -func (m *ReceiverSettleMode) Unmarshal(r *buffer.Buffer) error { - n, err := ReadUbyte(r) - *m = ReceiverSettleMode(n) - return err -} - -type Role bool - -const ( - RoleSender Role = false - RoleReceiver Role = true -) - -func (rl Role) String() string { - if rl { - return "Receiver" - } - return "Sender" -} - -func (rl *Role) Unmarshal(r *buffer.Buffer) error { - b, err := readBool(r) - *rl = Role(b) - return err -} - -func (rl Role) Marshal(wr *buffer.Buffer) error { - return Marshal(wr, (bool)(rl)) -} - -type SASLCode uint8 - -// SASL Codes -const ( - CodeSASLOK SASLCode = iota // Connection authentication succeeded. - CodeSASLAuth // Connection authentication failed due to an unspecified problem with the supplied credentials. - CodeSASLSysPerm // Connection authentication failed due to a system error that is unlikely to be corrected without intervention. -) - -func (s SASLCode) Marshal(wr *buffer.Buffer) error { - return Marshal(wr, uint8(s)) -} - -func (s *SASLCode) Unmarshal(r *buffer.Buffer) error { - n, err := ReadUbyte(r) - *s = SASLCode(n) - return err -} - -// DeliveryState encapsulates the various concrete delivery states. -// http://docs.oasis-open.org/amqp/core/v1.0/os/amqp-core-messaging-v1.0-os.html#section-delivery-state -// TODO: http://docs.oasis-open.org/amqp/core/v1.0/os/amqp-core-transactions-v1.0-os.html#type-declared -type DeliveryState interface { - deliveryState() // marker method -} - -type Unsettled map[string]DeliveryState - -func (u Unsettled) Marshal(wr *buffer.Buffer) error { - return writeMap(wr, u) -} - -func (u *Unsettled) Unmarshal(r *buffer.Buffer) error { - count, err := readMapHeader(r) - if err != nil { - return err - } - - m := make(Unsettled, count/2) - for i := uint32(0); i < count; i += 2 { - key, err := ReadString(r) - if err != nil { - return err - } - var value DeliveryState - err = Unmarshal(r, &value) - if err != nil { - return err - } - m[key] = value - } - *u = m - return nil -} - -type Filter map[Symbol]*DescribedType - -func (f Filter) Marshal(wr *buffer.Buffer) error { - return writeMap(wr, f) -} - -func (f *Filter) Unmarshal(r *buffer.Buffer) error { - count, err := readMapHeader(r) - if err != nil { - return err - } - - m := make(Filter, count/2) - for i := uint32(0); i < count; i += 2 { - key, err := ReadString(r) - if err != nil { - return err - } - var value DescribedType - err = Unmarshal(r, &value) - if err != nil { - return err - } - m[Symbol(key)] = &value - } - *f = m - return nil -} - -// peekMessageType reads the message type without -// modifying any data. -func PeekMessageType(buf []byte) (uint8, uint8, error) { - if len(buf) < 3 { - return 0, 0, errors.New("invalid message") - } - - if buf[0] != 0 { - return 0, 0, fmt.Errorf("invalid composite header %02x", buf[0]) - } - - // copied from readUlong to avoid allocations - t := AMQPType(buf[1]) - if t == TypeCodeUlong0 { - return 0, 2, nil - } - - if t == TypeCodeSmallUlong { - if len(buf[2:]) == 0 { - return 0, 0, errors.New("invalid ulong") - } - return buf[2], 3, nil - } - - if t != TypeCodeUlong { - return 0, 0, fmt.Errorf("invalid type for uint32 %02x", t) - } - - if len(buf[2:]) < 8 { - return 0, 0, errors.New("invalid ulong") - } - v := binary.BigEndian.Uint64(buf[2:10]) - - return uint8(v), 10, nil -} - -func tryReadNull(r *buffer.Buffer) bool { - if r.Len() > 0 && AMQPType(r.Bytes()[0]) == TypeCodeNull { - r.Skip(1) - return true - } - return false -} - -// Annotations keys must be of type string, int, or int64. -// -// String keys are encoded as AMQP Symbols. -type Annotations map[any]any - -func (a Annotations) Marshal(wr *buffer.Buffer) error { - return writeMap(wr, a) -} - -func (a *Annotations) Unmarshal(r *buffer.Buffer) error { - count, err := readMapHeader(r) - if err != nil { - return err - } - - m := make(Annotations, count/2) - for i := uint32(0); i < count; i += 2 { - key, err := ReadAny(r) - if err != nil { - return err - } - value, err := ReadAny(r) - if err != nil { - return err - } - m[key] = value - } - *a = m - return nil -} - -// ErrCond is one of the error conditions defined in the AMQP spec. -type ErrCond string - -func (ec ErrCond) Marshal(wr *buffer.Buffer) error { - return (Symbol)(ec).Marshal(wr) -} - -func (ec *ErrCond) Unmarshal(r *buffer.Buffer) error { - s, err := ReadString(r) - *ec = ErrCond(s) - return err -} - -/* - - - - - - -*/ - -// Error is an AMQP error. -type Error struct { - // A symbolic value indicating the error condition. - Condition ErrCond - - // descriptive text about the error condition - // - // This text supplies any supplementary details not indicated by the condition field. - // This text can be logged as an aid to resolving issues. - Description string - - // map carrying information about the error condition - Info map[string]any -} - -func (e *Error) Marshal(wr *buffer.Buffer) error { - return MarshalComposite(wr, TypeCodeError, []MarshalField{ - {Value: &e.Condition, Omit: false}, - {Value: &e.Description, Omit: e.Description == ""}, - {Value: e.Info, Omit: len(e.Info) == 0}, - }) -} - -func (e *Error) Unmarshal(r *buffer.Buffer) error { - return UnmarshalComposite(r, TypeCodeError, []UnmarshalField{ - {Field: &e.Condition, HandleNull: func() error { return errors.New("Error.Condition is required") }}, - {Field: &e.Description}, - {Field: &e.Info}, - }...) -} - -func (e *Error) String() string { - if e == nil { - return "*Error(nil)" - } - return fmt.Sprintf("*Error{Condition: %s, Description: %s, Info: %v}", - e.Condition, - e.Description, - e.Info, - ) -} - -func (e *Error) Error() string { - return e.String() -} - -/* - - - - - -*/ - -type StateReceived struct { - // When sent by the sender this indicates the first section of the message - // (with section-number 0 being the first section) for which data can be resent. - // Data from sections prior to the given section cannot be retransmitted for - // this delivery. - // - // When sent by the receiver this indicates the first section of the message - // for which all data might not yet have been received. - SectionNumber uint32 - - // When sent by the sender this indicates the first byte of the encoded section - // data of the section given by section-number for which data can be resent - // (with section-offset 0 being the first byte). Bytes from the same section - // prior to the given offset section cannot be retransmitted for this delivery. - // - // When sent by the receiver this indicates the first byte of the given section - // which has not yet been received. Note that if a receiver has received all of - // section number X (which contains N bytes of data), but none of section number - // X + 1, then it can indicate this by sending either Received(section-number=X, - // section-offset=N) or Received(section-number=X+1, section-offset=0). The state - // Received(section-number=0, section-offset=0) indicates that no message data - // at all has been transferred. - SectionOffset uint64 -} - -func (sr *StateReceived) deliveryState() {} - -func (sr *StateReceived) Marshal(wr *buffer.Buffer) error { - return MarshalComposite(wr, TypeCodeStateReceived, []MarshalField{ - {Value: &sr.SectionNumber, Omit: false}, - {Value: &sr.SectionOffset, Omit: false}, - }) -} - -func (sr *StateReceived) Unmarshal(r *buffer.Buffer) error { - return UnmarshalComposite(r, TypeCodeStateReceived, []UnmarshalField{ - {Field: &sr.SectionNumber, HandleNull: func() error { return errors.New("StateReceiver.SectionNumber is required") }}, - {Field: &sr.SectionOffset, HandleNull: func() error { return errors.New("StateReceiver.SectionOffset is required") }}, - }...) -} - -/* - - - -*/ - -type StateAccepted struct{} - -func (sr *StateAccepted) deliveryState() {} - -func (sa *StateAccepted) Marshal(wr *buffer.Buffer) error { - return MarshalComposite(wr, TypeCodeStateAccepted, nil) -} - -func (sa *StateAccepted) Unmarshal(r *buffer.Buffer) error { - return UnmarshalComposite(r, TypeCodeStateAccepted) -} - -func (sa *StateAccepted) String() string { - return "Accepted" -} - -/* - - - - -*/ - -type StateRejected struct { - Error *Error -} - -func (sr *StateRejected) deliveryState() {} - -func (sr *StateRejected) Marshal(wr *buffer.Buffer) error { - return MarshalComposite(wr, TypeCodeStateRejected, []MarshalField{ - {Value: sr.Error, Omit: sr.Error == nil}, - }) -} - -func (sr *StateRejected) Unmarshal(r *buffer.Buffer) error { - return UnmarshalComposite(r, TypeCodeStateRejected, - UnmarshalField{Field: &sr.Error}, - ) -} - -func (sr *StateRejected) String() string { - return fmt.Sprintf("Rejected{Error: %v}", sr.Error) -} - -/* - - - -*/ - -type StateReleased struct{} - -func (sr *StateReleased) deliveryState() {} - -func (sr *StateReleased) Marshal(wr *buffer.Buffer) error { - return MarshalComposite(wr, TypeCodeStateReleased, nil) -} - -func (sr *StateReleased) Unmarshal(r *buffer.Buffer) error { - return UnmarshalComposite(r, TypeCodeStateReleased) -} - -func (sr *StateReleased) String() string { - return "Released" -} - -/* - - - - - - -*/ - -type StateModified struct { - // count the transfer as an unsuccessful delivery attempt - // - // If the delivery-failed flag is set, any messages modified - // MUST have their delivery-count incremented. - DeliveryFailed bool - - // prevent redelivery - // - // If the undeliverable-here is set, then any messages released MUST NOT - // be redelivered to the modifying link endpoint. - UndeliverableHere bool - - // message attributes - // Map containing attributes to combine with the existing message-annotations - // held in the message's header section. Where the existing message-annotations - // of the message contain an entry with the same key as an entry in this field, - // the value in this field associated with that key replaces the one in the - // existing headers; where the existing message-annotations has no such value, - // the value in this map is added. - MessageAnnotations Annotations -} - -func (sr *StateModified) deliveryState() {} - -func (sm *StateModified) Marshal(wr *buffer.Buffer) error { - return MarshalComposite(wr, TypeCodeStateModified, []MarshalField{ - {Value: &sm.DeliveryFailed, Omit: !sm.DeliveryFailed}, - {Value: &sm.UndeliverableHere, Omit: !sm.UndeliverableHere}, - {Value: sm.MessageAnnotations, Omit: sm.MessageAnnotations == nil}, - }) -} - -func (sm *StateModified) Unmarshal(r *buffer.Buffer) error { - return UnmarshalComposite(r, TypeCodeStateModified, []UnmarshalField{ - {Field: &sm.DeliveryFailed}, - {Field: &sm.UndeliverableHere}, - {Field: &sm.MessageAnnotations}, - }...) -} - -func (sm *StateModified) String() string { - return fmt.Sprintf("Modified{DeliveryFailed: %t, UndeliverableHere: %t, MessageAnnotations: %v}", sm.DeliveryFailed, sm.UndeliverableHere, sm.MessageAnnotations) -} - -// symbol is an AMQP symbolic string. -type Symbol string - -func (s Symbol) Marshal(wr *buffer.Buffer) error { - l := len(s) - switch { - // Sym8 - case l < 256: - wr.Append([]byte{ - byte(TypeCodeSym8), - byte(l), - }) - wr.AppendString(string(s)) - - // Sym32 - case uint(l) < math.MaxUint32: - wr.AppendByte(uint8(TypeCodeSym32)) - wr.AppendUint32(uint32(l)) - wr.AppendString(string(s)) - default: - return errors.New("too long") - } - return nil -} - -type Milliseconds time.Duration - -func (m Milliseconds) Marshal(wr *buffer.Buffer) error { - writeUint32(wr, uint32(m/Milliseconds(time.Millisecond))) - return nil -} - -func (m *Milliseconds) Unmarshal(r *buffer.Buffer) error { - n, err := readUint(r) - *m = Milliseconds(time.Duration(n) * time.Millisecond) - return err -} - -// mapAnyAny is used to decode AMQP maps who's keys are undefined or -// inconsistently typed. -type mapAnyAny map[any]any - -func (m mapAnyAny) Marshal(wr *buffer.Buffer) error { - return writeMap(wr, map[any]any(m)) -} - -func (m *mapAnyAny) Unmarshal(r *buffer.Buffer) error { - count, err := readMapHeader(r) - if err != nil { - return err - } - - mm := make(mapAnyAny, count/2) - for i := uint32(0); i < count; i += 2 { - key, err := ReadAny(r) - if err != nil { - return err - } - value, err := ReadAny(r) - if err != nil { - return err - } - - // https://golang.org/ref/spec#Map_types: - // The comparison operators == and != must be fully defined - // for operands of the key type; thus the key type must not - // be a function, map, or slice. - switch reflect.ValueOf(key).Kind() { - case reflect.Slice, reflect.Func, reflect.Map: - return errors.New("invalid map key") - } - - mm[key] = value - } - *m = mm - return nil -} - -// mapStringAny is used to decode AMQP maps that have string keys -type mapStringAny map[string]any - -func (m mapStringAny) Marshal(wr *buffer.Buffer) error { - return writeMap(wr, map[string]any(m)) -} - -func (m *mapStringAny) Unmarshal(r *buffer.Buffer) error { - count, err := readMapHeader(r) - if err != nil { - return err - } - - mm := make(mapStringAny, count/2) - for i := uint32(0); i < count; i += 2 { - key, err := ReadString(r) - if err != nil { - return err - } - value, err := ReadAny(r) - if err != nil { - return err - } - mm[key] = value - } - *m = mm - - return nil -} - -// mapStringAny is used to decode AMQP maps that have Symbol keys -type mapSymbolAny map[Symbol]any - -func (m mapSymbolAny) Marshal(wr *buffer.Buffer) error { - return writeMap(wr, map[Symbol]any(m)) -} - -func (m *mapSymbolAny) Unmarshal(r *buffer.Buffer) error { - count, err := readMapHeader(r) - if err != nil { - return err - } - - mm := make(mapSymbolAny, count/2) - for i := uint32(0); i < count; i += 2 { - key, err := ReadString(r) - if err != nil { - return err - } - value, err := ReadAny(r) - if err != nil { - return err - } - mm[Symbol(key)] = value - } - *m = mm - return nil -} - -// UUID is a 128 bit identifier as defined in RFC 4122. -type UUID [16]byte - -// String returns the hex encoded representation described in RFC 4122, Section 3. -func (u UUID) String() string { - var buf [36]byte - hex.Encode(buf[:8], u[:4]) - buf[8] = '-' - hex.Encode(buf[9:13], u[4:6]) - buf[13] = '-' - hex.Encode(buf[14:18], u[6:8]) - buf[18] = '-' - hex.Encode(buf[19:23], u[8:10]) - buf[23] = '-' - hex.Encode(buf[24:], u[10:]) - return string(buf[:]) -} - -func (u UUID) Marshal(wr *buffer.Buffer) error { - wr.AppendByte(byte(TypeCodeUUID)) - wr.Append(u[:]) - return nil -} - -func (u *UUID) Unmarshal(r *buffer.Buffer) error { - un, err := readUUID(r) - *u = un - return err -} - -type LifetimePolicy uint8 - -const ( - DeleteOnClose = LifetimePolicy(TypeCodeDeleteOnClose) - DeleteOnNoLinks = LifetimePolicy(TypeCodeDeleteOnNoLinks) - DeleteOnNoMessages = LifetimePolicy(TypeCodeDeleteOnNoMessages) - DeleteOnNoLinksOrMessages = LifetimePolicy(TypeCodeDeleteOnNoLinksOrMessages) -) - -func (p LifetimePolicy) Marshal(wr *buffer.Buffer) error { - wr.Append([]byte{ - 0x0, - byte(TypeCodeSmallUlong), - byte(p), - byte(TypeCodeList0), - }) - return nil -} - -func (p *LifetimePolicy) Unmarshal(r *buffer.Buffer) error { - typ, fields, err := readCompositeHeader(r) - if err != nil { - return err - } - if fields != 0 { - return fmt.Errorf("invalid size %d for lifetime-policy", fields) - } - *p = LifetimePolicy(typ) - return nil -} - -type DescribedType struct { - Descriptor any - Value any -} - -func (t DescribedType) Marshal(wr *buffer.Buffer) error { - wr.AppendByte(0x0) // descriptor constructor - err := Marshal(wr, t.Descriptor) - if err != nil { - return err - } - return Marshal(wr, t.Value) -} - -func (t *DescribedType) Unmarshal(r *buffer.Buffer) error { - b, err := r.ReadByte() - if err != nil { - return err - } - - if b != 0x0 { - return fmt.Errorf("invalid described type header %02x", b) - } - - err = Unmarshal(r, &t.Descriptor) - if err != nil { - return err - } - return Unmarshal(r, &t.Value) -} - -func (t DescribedType) String() string { - return fmt.Sprintf("DescribedType{descriptor: %v, value: %v}", - t.Descriptor, - t.Value, - ) -} - -// SLICES - -// ArrayUByte allows encoding []uint8/[]byte as an array -// rather than binary data. -type ArrayUByte []uint8 - -func (a ArrayUByte) Marshal(wr *buffer.Buffer) error { - const typeSize = 1 - - writeArrayHeader(wr, len(a), typeSize, TypeCodeUbyte) - wr.Append(a) - - return nil -} - -func (a *ArrayUByte) Unmarshal(r *buffer.Buffer) error { - length, err := readArrayHeader(r) - if err != nil { - return err - } - - type_, err := readType(r) - if err != nil { - return err - } - if type_ != TypeCodeUbyte { - return fmt.Errorf("invalid type for []uint16 %02x", type_) - } - - buf, ok := r.Next(length) - if !ok { - return fmt.Errorf("invalid length %d", length) - } - *a = append([]byte(nil), buf...) - - return nil -} - -type arrayInt8 []int8 - -func (a arrayInt8) Marshal(wr *buffer.Buffer) error { - const typeSize = 1 - - writeArrayHeader(wr, len(a), typeSize, TypeCodeByte) - - for _, value := range a { - wr.AppendByte(uint8(value)) - } - - return nil -} - -func (a *arrayInt8) Unmarshal(r *buffer.Buffer) error { - length, err := readArrayHeader(r) - if err != nil { - return err - } - - type_, err := readType(r) - if err != nil { - return err - } - if type_ != TypeCodeByte { - return fmt.Errorf("invalid type for []uint16 %02x", type_) - } - - buf, ok := r.Next(length) - if !ok { - return fmt.Errorf("invalid length %d", length) - } - - aa := (*a)[:0] - if int64(cap(aa)) < length { - aa = make([]int8, length) - } else { - aa = aa[:length] - } - - for i, value := range buf { - aa[i] = int8(value) - } - - *a = aa - return nil -} - -type arrayUint16 []uint16 - -func (a arrayUint16) Marshal(wr *buffer.Buffer) error { - const typeSize = 2 - - writeArrayHeader(wr, len(a), typeSize, TypeCodeUshort) - - for _, element := range a { - wr.AppendUint16(element) - } - - return nil -} - -func (a *arrayUint16) Unmarshal(r *buffer.Buffer) error { - length, err := readArrayHeader(r) - if err != nil { - return err - } - - type_, err := readType(r) - if err != nil { - return err - } - if type_ != TypeCodeUshort { - return fmt.Errorf("invalid type for []uint16 %02x", type_) - } - - const typeSize = 2 - buf, ok := r.Next(length * typeSize) - if !ok { - return fmt.Errorf("invalid length %d", length) - } - - aa := (*a)[:0] - if int64(cap(aa)) < length { - aa = make([]uint16, length) - } else { - aa = aa[:length] - } - - var bufIdx int - for i := range aa { - aa[i] = binary.BigEndian.Uint16(buf[bufIdx:]) - bufIdx += 2 - } - - *a = aa - return nil -} - -type arrayInt16 []int16 - -func (a arrayInt16) Marshal(wr *buffer.Buffer) error { - const typeSize = 2 - - writeArrayHeader(wr, len(a), typeSize, TypeCodeShort) - - for _, element := range a { - wr.AppendUint16(uint16(element)) - } - - return nil -} - -func (a *arrayInt16) Unmarshal(r *buffer.Buffer) error { - length, err := readArrayHeader(r) - if err != nil { - return err - } - - type_, err := readType(r) - if err != nil { - return err - } - if type_ != TypeCodeShort { - return fmt.Errorf("invalid type for []uint16 %02x", type_) - } - - const typeSize = 2 - buf, ok := r.Next(length * typeSize) - if !ok { - return fmt.Errorf("invalid length %d", length) - } - - aa := (*a)[:0] - if int64(cap(aa)) < length { - aa = make([]int16, length) - } else { - aa = aa[:length] - } - - var bufIdx int - for i := range aa { - aa[i] = int16(binary.BigEndian.Uint16(buf[bufIdx : bufIdx+2])) - bufIdx += 2 - } - - *a = aa - return nil -} - -type arrayUint32 []uint32 - -func (a arrayUint32) Marshal(wr *buffer.Buffer) error { - var ( - typeSize = 1 - TypeCode = TypeCodeSmallUint - ) - for _, n := range a { - if n > math.MaxUint8 { - typeSize = 4 - TypeCode = TypeCodeUint - break - } - } - - writeArrayHeader(wr, len(a), typeSize, TypeCode) - - if TypeCode == TypeCodeUint { - for _, element := range a { - wr.AppendUint32(element) - } - } else { - for _, element := range a { - wr.AppendByte(byte(element)) - } - } - - return nil -} - -func (a *arrayUint32) Unmarshal(r *buffer.Buffer) error { - length, err := readArrayHeader(r) - if err != nil { - return err - } - - aa := (*a)[:0] - - type_, err := readType(r) - if err != nil { - return err - } - switch type_ { - case TypeCodeUint0: - if int64(cap(aa)) < length { - aa = make([]uint32, length) - } else { - aa = aa[:length] - for i := range aa { - aa[i] = 0 - } - } - case TypeCodeSmallUint: - buf, ok := r.Next(length) - if !ok { - return errors.New("invalid length") - } - - if int64(cap(aa)) < length { - aa = make([]uint32, length) - } else { - aa = aa[:length] - } - - for i, n := range buf { - aa[i] = uint32(n) - } - case TypeCodeUint: - const typeSize = 4 - buf, ok := r.Next(length * typeSize) - if !ok { - return fmt.Errorf("invalid length %d", length) - } - - if int64(cap(aa)) < length { - aa = make([]uint32, length) - } else { - aa = aa[:length] - } - - var bufIdx int - for i := range aa { - aa[i] = binary.BigEndian.Uint32(buf[bufIdx : bufIdx+4]) - bufIdx += 4 - } - default: - return fmt.Errorf("invalid type for []uint32 %02x", type_) - } - - *a = aa - return nil -} - -type arrayInt32 []int32 - -func (a arrayInt32) Marshal(wr *buffer.Buffer) error { - var ( - typeSize = 1 - TypeCode = TypeCodeSmallint - ) - for _, n := range a { - if n > math.MaxInt8 { - typeSize = 4 - TypeCode = TypeCodeInt - break - } - } - - writeArrayHeader(wr, len(a), typeSize, TypeCode) - - if TypeCode == TypeCodeInt { - for _, element := range a { - wr.AppendUint32(uint32(element)) - } - } else { - for _, element := range a { - wr.AppendByte(byte(element)) - } - } - - return nil -} - -func (a *arrayInt32) Unmarshal(r *buffer.Buffer) error { - length, err := readArrayHeader(r) - if err != nil { - return err - } - - aa := (*a)[:0] - - type_, err := readType(r) - if err != nil { - return err - } - switch type_ { - case TypeCodeSmallint: - buf, ok := r.Next(length) - if !ok { - return errors.New("invalid length") - } - - if int64(cap(aa)) < length { - aa = make([]int32, length) - } else { - aa = aa[:length] - } - - for i, n := range buf { - aa[i] = int32(int8(n)) - } - case TypeCodeInt: - const typeSize = 4 - buf, ok := r.Next(length * typeSize) - if !ok { - return fmt.Errorf("invalid length %d", length) - } - - if int64(cap(aa)) < length { - aa = make([]int32, length) - } else { - aa = aa[:length] - } - - var bufIdx int - for i := range aa { - aa[i] = int32(binary.BigEndian.Uint32(buf[bufIdx:])) - bufIdx += 4 - } - default: - return fmt.Errorf("invalid type for []int32 %02x", type_) - } - - *a = aa - return nil -} - -type arrayUint64 []uint64 - -func (a arrayUint64) Marshal(wr *buffer.Buffer) error { - var ( - typeSize = 1 - TypeCode = TypeCodeSmallUlong - ) - for _, n := range a { - if n > math.MaxUint8 { - typeSize = 8 - TypeCode = TypeCodeUlong - break - } - } - - writeArrayHeader(wr, len(a), typeSize, TypeCode) - - if TypeCode == TypeCodeUlong { - for _, element := range a { - wr.AppendUint64(element) - } - } else { - for _, element := range a { - wr.AppendByte(byte(element)) - } - } - - return nil -} - -func (a *arrayUint64) Unmarshal(r *buffer.Buffer) error { - length, err := readArrayHeader(r) - if err != nil { - return err - } - - aa := (*a)[:0] - - type_, err := readType(r) - if err != nil { - return err - } - switch type_ { - case TypeCodeUlong0: - if int64(cap(aa)) < length { - aa = make([]uint64, length) - } else { - aa = aa[:length] - for i := range aa { - aa[i] = 0 - } - } - case TypeCodeSmallUlong: - buf, ok := r.Next(length) - if !ok { - return errors.New("invalid length") - } - - if int64(cap(aa)) < length { - aa = make([]uint64, length) - } else { - aa = aa[:length] - } - - for i, n := range buf { - aa[i] = uint64(n) - } - case TypeCodeUlong: - const typeSize = 8 - buf, ok := r.Next(length * typeSize) - if !ok { - return errors.New("invalid length") - } - - if int64(cap(aa)) < length { - aa = make([]uint64, length) - } else { - aa = aa[:length] - } - - var bufIdx int - for i := range aa { - aa[i] = binary.BigEndian.Uint64(buf[bufIdx : bufIdx+8]) - bufIdx += 8 - } - default: - return fmt.Errorf("invalid type for []uint64 %02x", type_) - } - - *a = aa - return nil -} - -type arrayInt64 []int64 - -func (a arrayInt64) Marshal(wr *buffer.Buffer) error { - var ( - typeSize = 1 - TypeCode = TypeCodeSmalllong - ) - for _, n := range a { - if n > math.MaxInt8 { - typeSize = 8 - TypeCode = TypeCodeLong - break - } - } - - writeArrayHeader(wr, len(a), typeSize, TypeCode) - - if TypeCode == TypeCodeLong { - for _, element := range a { - wr.AppendUint64(uint64(element)) - } - } else { - for _, element := range a { - wr.AppendByte(byte(element)) - } - } - - return nil -} - -func (a *arrayInt64) Unmarshal(r *buffer.Buffer) error { - length, err := readArrayHeader(r) - if err != nil { - return err - } - - aa := (*a)[:0] - - type_, err := readType(r) - if err != nil { - return err - } - switch type_ { - case TypeCodeSmalllong: - buf, ok := r.Next(length) - if !ok { - return errors.New("invalid length") - } - - if int64(cap(aa)) < length { - aa = make([]int64, length) - } else { - aa = aa[:length] - } - - for i, n := range buf { - aa[i] = int64(int8(n)) - } - case TypeCodeLong: - const typeSize = 8 - buf, ok := r.Next(length * typeSize) - if !ok { - return errors.New("invalid length") - } - - if int64(cap(aa)) < length { - aa = make([]int64, length) - } else { - aa = aa[:length] - } - - var bufIdx int - for i := range aa { - aa[i] = int64(binary.BigEndian.Uint64(buf[bufIdx:])) - bufIdx += 8 - } - default: - return fmt.Errorf("invalid type for []uint64 %02x", type_) - } - - *a = aa - return nil -} - -type arrayFloat []float32 - -func (a arrayFloat) Marshal(wr *buffer.Buffer) error { - const typeSize = 4 - - writeArrayHeader(wr, len(a), typeSize, TypeCodeFloat) - - for _, element := range a { - wr.AppendUint32(math.Float32bits(element)) - } - - return nil -} - -func (a *arrayFloat) Unmarshal(r *buffer.Buffer) error { - length, err := readArrayHeader(r) - if err != nil { - return err - } - - type_, err := readType(r) - if err != nil { - return err - } - if type_ != TypeCodeFloat { - return fmt.Errorf("invalid type for []float32 %02x", type_) - } - - const typeSize = 4 - buf, ok := r.Next(length * typeSize) - if !ok { - return fmt.Errorf("invalid length %d", length) - } - - aa := (*a)[:0] - if int64(cap(aa)) < length { - aa = make([]float32, length) - } else { - aa = aa[:length] - } - - var bufIdx int - for i := range aa { - bits := binary.BigEndian.Uint32(buf[bufIdx:]) - aa[i] = math.Float32frombits(bits) - bufIdx += typeSize - } - - *a = aa - return nil -} - -type arrayDouble []float64 - -func (a arrayDouble) Marshal(wr *buffer.Buffer) error { - const typeSize = 8 - - writeArrayHeader(wr, len(a), typeSize, TypeCodeDouble) - - for _, element := range a { - wr.AppendUint64(math.Float64bits(element)) - } - - return nil -} - -func (a *arrayDouble) Unmarshal(r *buffer.Buffer) error { - length, err := readArrayHeader(r) - if err != nil { - return err - } - - type_, err := readType(r) - if err != nil { - return err - } - if type_ != TypeCodeDouble { - return fmt.Errorf("invalid type for []float64 %02x", type_) - } - - const typeSize = 8 - buf, ok := r.Next(length * typeSize) - if !ok { - return fmt.Errorf("invalid length %d", length) - } - - aa := (*a)[:0] - if int64(cap(aa)) < length { - aa = make([]float64, length) - } else { - aa = aa[:length] - } - - var bufIdx int - for i := range aa { - bits := binary.BigEndian.Uint64(buf[bufIdx:]) - aa[i] = math.Float64frombits(bits) - bufIdx += typeSize - } - - *a = aa - return nil -} - -type arrayBool []bool - -func (a arrayBool) Marshal(wr *buffer.Buffer) error { - const typeSize = 1 - - writeArrayHeader(wr, len(a), typeSize, TypeCodeBool) - - for _, element := range a { - value := byte(0) - if element { - value = 1 - } - wr.AppendByte(value) - } - - return nil -} - -func (a *arrayBool) Unmarshal(r *buffer.Buffer) error { - length, err := readArrayHeader(r) - if err != nil { - return err - } - - aa := (*a)[:0] - if int64(cap(aa)) < length { - aa = make([]bool, length) - } else { - aa = aa[:length] - } - - type_, err := readType(r) - if err != nil { - return err - } - switch type_ { - case TypeCodeBool: - buf, ok := r.Next(length) - if !ok { - return errors.New("invalid length") - } - - for i, value := range buf { - if value == 0 { - aa[i] = false - } else { - aa[i] = true - } - } - - case TypeCodeBoolTrue: - for i := range aa { - aa[i] = true - } - case TypeCodeBoolFalse: - for i := range aa { - aa[i] = false - } - default: - return fmt.Errorf("invalid type for []bool %02x", type_) - } - - *a = aa - return nil -} - -type arrayString []string - -func (a arrayString) Marshal(wr *buffer.Buffer) error { - var ( - elementType = TypeCodeStr8 - elementsSizeTotal int - ) - for _, element := range a { - if !utf8.ValidString(element) { - return errors.New("not a valid UTF-8 string") - } - - elementsSizeTotal += len(element) - - if len(element) > math.MaxUint8 { - elementType = TypeCodeStr32 - } - } - - writeVariableArrayHeader(wr, len(a), elementsSizeTotal, elementType) - - if elementType == TypeCodeStr32 { - for _, element := range a { - wr.AppendUint32(uint32(len(element))) - wr.AppendString(element) - } - } else { - for _, element := range a { - wr.AppendByte(byte(len(element))) - wr.AppendString(element) - } - } - - return nil -} - -func (a *arrayString) Unmarshal(r *buffer.Buffer) error { - length, err := readArrayHeader(r) - if err != nil { - return err - } - - const typeSize = 2 // assume all strings are at least 2 bytes - if length*typeSize > int64(r.Len()) { - return fmt.Errorf("invalid length %d", length) - } - - aa := (*a)[:0] - if int64(cap(aa)) < length { - aa = make([]string, length) - } else { - aa = aa[:length] - } - - type_, err := readType(r) - if err != nil { - return err - } - switch type_ { - case TypeCodeStr8: - for i := range aa { - size, err := r.ReadByte() - if err != nil { - return err - } - - buf, ok := r.Next(int64(size)) - if !ok { - return errors.New("invalid length") - } - - aa[i] = string(buf) - } - case TypeCodeStr32: - for i := range aa { - buf, ok := r.Next(4) - if !ok { - return errors.New("invalid length") - } - size := int64(binary.BigEndian.Uint32(buf)) - - buf, ok = r.Next(size) - if !ok { - return errors.New("invalid length") - } - aa[i] = string(buf) - } - default: - return fmt.Errorf("invalid type for []string %02x", type_) - } - - *a = aa - return nil -} - -type arraySymbol []Symbol - -func (a arraySymbol) Marshal(wr *buffer.Buffer) error { - var ( - elementType = TypeCodeSym8 - elementsSizeTotal int - ) - for _, element := range a { - elementsSizeTotal += len(element) - - if len(element) > math.MaxUint8 { - elementType = TypeCodeSym32 - } - } - - writeVariableArrayHeader(wr, len(a), elementsSizeTotal, elementType) - - if elementType == TypeCodeSym32 { - for _, element := range a { - wr.AppendUint32(uint32(len(element))) - wr.AppendString(string(element)) - } - } else { - for _, element := range a { - wr.AppendByte(byte(len(element))) - wr.AppendString(string(element)) - } - } - - return nil -} - -func (a *arraySymbol) Unmarshal(r *buffer.Buffer) error { - length, err := readArrayHeader(r) - if err != nil { - return err - } - - const typeSize = 2 // assume all symbols are at least 2 bytes - if length*typeSize > int64(r.Len()) { - return fmt.Errorf("invalid length %d", length) - } - - aa := (*a)[:0] - if int64(cap(aa)) < length { - aa = make([]Symbol, length) - } else { - aa = aa[:length] - } - - type_, err := readType(r) - if err != nil { - return err - } - switch type_ { - case TypeCodeSym8: - for i := range aa { - size, err := r.ReadByte() - if err != nil { - return err - } - - buf, ok := r.Next(int64(size)) - if !ok { - return errors.New("invalid length") - } - aa[i] = Symbol(buf) - } - case TypeCodeSym32: - for i := range aa { - buf, ok := r.Next(4) - if !ok { - return errors.New("invalid length") - } - size := int64(binary.BigEndian.Uint32(buf)) - - buf, ok = r.Next(size) - if !ok { - return errors.New("invalid length") - } - aa[i] = Symbol(buf) - } - default: - return fmt.Errorf("invalid type for []Symbol %02x", type_) - } - - *a = aa - return nil -} - -type arrayBinary [][]byte - -func (a arrayBinary) Marshal(wr *buffer.Buffer) error { - var ( - elementType = TypeCodeVbin8 - elementsSizeTotal int - ) - for _, element := range a { - elementsSizeTotal += len(element) - - if len(element) > math.MaxUint8 { - elementType = TypeCodeVbin32 - } - } - - writeVariableArrayHeader(wr, len(a), elementsSizeTotal, elementType) - - if elementType == TypeCodeVbin32 { - for _, element := range a { - wr.AppendUint32(uint32(len(element))) - wr.Append(element) - } - } else { - for _, element := range a { - wr.AppendByte(byte(len(element))) - wr.Append(element) - } - } - - return nil -} - -func (a *arrayBinary) Unmarshal(r *buffer.Buffer) error { - length, err := readArrayHeader(r) - if err != nil { - return err - } - - const typeSize = 2 // assume all binary is at least 2 bytes - if length*typeSize > int64(r.Len()) { - return fmt.Errorf("invalid length %d", length) - } - - aa := (*a)[:0] - if int64(cap(aa)) < length { - aa = make([][]byte, length) - } else { - aa = aa[:length] - } - - type_, err := readType(r) - if err != nil { - return err - } - switch type_ { - case TypeCodeVbin8: - for i := range aa { - size, err := r.ReadByte() - if err != nil { - return err - } - - buf, ok := r.Next(int64(size)) - if !ok { - return fmt.Errorf("invalid length %d", length) - } - aa[i] = append([]byte(nil), buf...) - } - case TypeCodeVbin32: - for i := range aa { - buf, ok := r.Next(4) - if !ok { - return errors.New("invalid length") - } - size := binary.BigEndian.Uint32(buf) - - buf, ok = r.Next(int64(size)) - if !ok { - return errors.New("invalid length") - } - aa[i] = append([]byte(nil), buf...) - } - default: - return fmt.Errorf("invalid type for [][]byte %02x", type_) - } - - *a = aa - return nil -} - -type arrayTimestamp []time.Time - -func (a arrayTimestamp) Marshal(wr *buffer.Buffer) error { - const typeSize = 8 - - writeArrayHeader(wr, len(a), typeSize, TypeCodeTimestamp) - - for _, element := range a { - ms := element.UnixNano() / int64(time.Millisecond) - wr.AppendUint64(uint64(ms)) - } - - return nil -} - -func (a *arrayTimestamp) Unmarshal(r *buffer.Buffer) error { - length, err := readArrayHeader(r) - if err != nil { - return err - } - - type_, err := readType(r) - if err != nil { - return err - } - if type_ != TypeCodeTimestamp { - return fmt.Errorf("invalid type for []time.Time %02x", type_) - } - - const typeSize = 8 - buf, ok := r.Next(length * typeSize) - if !ok { - return fmt.Errorf("invalid length %d", length) - } - - aa := (*a)[:0] - if int64(cap(aa)) < length { - aa = make([]time.Time, length) - } else { - aa = aa[:length] - } - - var bufIdx int - for i := range aa { - ms := int64(binary.BigEndian.Uint64(buf[bufIdx:])) - bufIdx += typeSize - aa[i] = time.Unix(ms/1000, (ms%1000)*1000000).UTC() - } - - *a = aa - return nil -} - -type arrayUUID []UUID - -func (a arrayUUID) Marshal(wr *buffer.Buffer) error { - const typeSize = 16 - - writeArrayHeader(wr, len(a), typeSize, TypeCodeUUID) - - for _, element := range a { - wr.Append(element[:]) - } - - return nil -} - -func (a *arrayUUID) Unmarshal(r *buffer.Buffer) error { - length, err := readArrayHeader(r) - if err != nil { - return err - } - - type_, err := readType(r) - if err != nil { - return err - } - if type_ != TypeCodeUUID { - return fmt.Errorf("invalid type for []UUID %#02x", type_) - } - - const typeSize = 16 - buf, ok := r.Next(length * typeSize) - if !ok { - return fmt.Errorf("invalid length %d", length) - } - - aa := (*a)[:0] - if int64(cap(aa)) < length { - aa = make([]UUID, length) - } else { - aa = aa[:length] - } - - var bufIdx int - for i := range aa { - copy(aa[i][:], buf[bufIdx:bufIdx+16]) - bufIdx += 16 - } - - *a = aa - return nil -} - -// LIST - -type list []any - -func (l list) Marshal(wr *buffer.Buffer) error { - length := len(l) - - // type - if length == 0 { - wr.AppendByte(byte(TypeCodeList0)) - return nil - } - wr.AppendByte(byte(TypeCodeList32)) - - // size - sizeIdx := wr.Len() - wr.Append([]byte{0, 0, 0, 0}) - - // length - wr.AppendUint32(uint32(length)) - - for _, element := range l { - err := Marshal(wr, element) - if err != nil { - return err - } - } - - // overwrite size - binary.BigEndian.PutUint32(wr.Bytes()[sizeIdx:], uint32(wr.Len()-(sizeIdx+4))) - - return nil -} - -func (l *list) Unmarshal(r *buffer.Buffer) error { - length, err := readListHeader(r) - if err != nil { - return err - } - - // assume that all types are at least 1 byte - if length > int64(r.Len()) { - return fmt.Errorf("invalid length %d", length) - } - - ll := *l - if int64(cap(ll)) < length { - ll = make([]any, length) - } else { - ll = ll[:length] - } - - for i := range ll { - ll[i], err = ReadAny(r) - if err != nil { - return err - } - } - - *l = ll - return nil -} - -// multiSymbol can decode a single symbol or an array. -type MultiSymbol []Symbol - -func (ms MultiSymbol) Marshal(wr *buffer.Buffer) error { - return Marshal(wr, []Symbol(ms)) -} - -func (ms *MultiSymbol) Unmarshal(r *buffer.Buffer) error { - type_, err := peekType(r) - if err != nil { - return err - } - - if type_ == TypeCodeSym8 || type_ == TypeCodeSym32 { - s, err := ReadString(r) - if err != nil { - return err - } - - *ms = []Symbol{Symbol(s)} - return nil - } - - return Unmarshal(r, (*[]Symbol)(ms)) -} diff --git a/sdk/messaging/azservicebus/internal/go-amqp/internal/frames/frames.go b/sdk/messaging/azservicebus/internal/go-amqp/internal/frames/frames.go deleted file mode 100644 index 86a2bc04f72c..000000000000 --- a/sdk/messaging/azservicebus/internal/go-amqp/internal/frames/frames.go +++ /dev/null @@ -1,1543 +0,0 @@ -// Copyright (C) 2017 Kale Blankenship -// Portions Copyright (c) Microsoft Corporation - -package frames - -import ( - "errors" - "fmt" - "strconv" - "time" - - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp/internal/buffer" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp/internal/encoding" -) - -// Type contains the values for a frame's type. -type Type uint8 - -const ( - TypeAMQP Type = 0x0 - TypeSASL Type = 0x1 -) - -// String implements the fmt.Stringer interface for type Type. -func (t Type) String() string { - if t == 0 { - return "AMQP" - } - return "SASL" -} - -/* - - - - - - - - - - - - - - - - -*/ -type Source struct { - // the address of the source - // - // The address of the source MUST NOT be set when sent on a attach frame sent by - // the receiving link endpoint where the dynamic flag is set to true (that is where - // the receiver is requesting the sender to create an addressable node). - // - // The address of the source MUST be set when sent on a attach frame sent by the - // sending link endpoint where the dynamic flag is set to true (that is where the - // sender has created an addressable node at the request of the receiver and is now - // communicating the address of that created node). The generated name of the address - // SHOULD include the link name and the container-id of the remote container to allow - // for ease of identification. - Address string - - // indicates the durability of the terminus - // - // Indicates what state of the terminus will be retained durably: the state of durable - // messages, only existence and configuration of the terminus, or no state at all. - // - // 0: none - // 1: configuration - // 2: unsettled-state - Durable encoding.Durability - - // the expiry policy of the source - // - // link-detach: The expiry timer starts when terminus is detached. - // session-end: The expiry timer starts when the most recently associated session is - // ended. - // connection-close: The expiry timer starts when most recently associated connection - // is closed. - // never: The terminus never expires. - ExpiryPolicy encoding.ExpiryPolicy - - // duration that an expiring source will be retained - // - // The source starts expiring as indicated by the expiry-policy. - Timeout uint32 // seconds - - // request dynamic creation of a remote node - // - // When set to true by the receiving link endpoint, this field constitutes a request - // for the sending peer to dynamically create a node at the source. In this case the - // address field MUST NOT be set. - // - // When set to true by the sending link endpoint this field indicates creation of a - // dynamically created node. In this case the address field will contain the address - // of the created node. The generated address SHOULD include the link name and other - // available information on the initiator of the request (such as the remote - // container-id) in some recognizable form for ease of traceability. - Dynamic bool - - // properties of the dynamically created node - // - // If the dynamic field is not set to true this field MUST be left unset. - // - // When set by the receiving link endpoint, this field contains the desired - // properties of the node the receiver wishes to be created. When set by the - // sending link endpoint this field contains the actual properties of the - // dynamically created node. See subsection 3.5.9 for standard node properties. - // http://www.amqp.org/specification/1.0/node-properties - // - // lifetime-policy: The lifetime of a dynamically generated node. - // Definitionally, the lifetime will never be less than the lifetime - // of the link which caused its creation, however it is possible to - // extend the lifetime of dynamically created node using a lifetime - // policy. The value of this entry MUST be of a type which provides - // the lifetime-policy archetype. The following standard - // lifetime-policies are defined below: delete-on-close, - // delete-on-no-links, delete-on-no-messages or - // delete-on-no-links-or-messages. - // supported-dist-modes: The distribution modes that the node supports. - // The value of this entry MUST be one or more symbols which are valid - // distribution-modes. That is, the value MUST be of the same type as - // would be valid in a field defined with the following attributes: - // type="symbol" multiple="true" requires="distribution-mode" - DynamicNodeProperties map[encoding.Symbol]any // TODO: implement custom type with validation - - // the distribution mode of the link - // - // This field MUST be set by the sending end of the link if the endpoint supports more - // than one distribution-mode. This field MAY be set by the receiving end of the link - // to indicate a preference when a node supports multiple distribution modes. - DistributionMode encoding.Symbol - - // a set of predicates to filter the messages admitted onto the link - // - // The receiving endpoint sets its desired filter, the sending endpoint sets the filter - // actually in place (including any filters defaulted at the node). The receiving - // endpoint MUST check that the filter in place meets its needs and take responsibility - // for detaching if it does not. - Filter encoding.Filter - - // default outcome for unsettled transfers - // - // Indicates the outcome to be used for transfers that have not reached a terminal - // state at the receiver when the transfer is settled, including when the source - // is destroyed. The value MUST be a valid outcome (e.g., released or rejected). - DefaultOutcome any - - // descriptors for the outcomes that can be chosen on this link - // - // The values in this field are the symbolic descriptors of the outcomes that can - // be chosen on this link. This field MAY be empty, indicating that the default-outcome - // will be assumed for all message transfers (if the default-outcome is not set, and no - // outcomes are provided, then the accepted outcome MUST be supported by the source). - // - // When present, the values MUST be a symbolic descriptor of a valid outcome, - // e.g., "amqp:accepted:list". - Outcomes encoding.MultiSymbol - - // the extension capabilities the sender supports/desires - // - // http://www.amqp.org/specification/1.0/source-capabilities - Capabilities encoding.MultiSymbol -} - -func (s *Source) Marshal(wr *buffer.Buffer) error { - return encoding.MarshalComposite(wr, encoding.TypeCodeSource, []encoding.MarshalField{ - {Value: &s.Address, Omit: s.Address == ""}, - {Value: &s.Durable, Omit: s.Durable == encoding.DurabilityNone}, - {Value: &s.ExpiryPolicy, Omit: s.ExpiryPolicy == "" || s.ExpiryPolicy == encoding.ExpirySessionEnd}, - {Value: &s.Timeout, Omit: s.Timeout == 0}, - {Value: &s.Dynamic, Omit: !s.Dynamic}, - {Value: s.DynamicNodeProperties, Omit: len(s.DynamicNodeProperties) == 0}, - {Value: &s.DistributionMode, Omit: s.DistributionMode == ""}, - {Value: s.Filter, Omit: len(s.Filter) == 0}, - {Value: &s.DefaultOutcome, Omit: s.DefaultOutcome == nil}, - {Value: &s.Outcomes, Omit: len(s.Outcomes) == 0}, - {Value: &s.Capabilities, Omit: len(s.Capabilities) == 0}, - }) -} - -func (s *Source) Unmarshal(r *buffer.Buffer) error { - return encoding.UnmarshalComposite(r, encoding.TypeCodeSource, []encoding.UnmarshalField{ - {Field: &s.Address}, - {Field: &s.Durable}, - {Field: &s.ExpiryPolicy, HandleNull: func() error { s.ExpiryPolicy = encoding.ExpirySessionEnd; return nil }}, - {Field: &s.Timeout}, - {Field: &s.Dynamic}, - {Field: &s.DynamicNodeProperties}, - {Field: &s.DistributionMode}, - {Field: &s.Filter}, - {Field: &s.DefaultOutcome}, - {Field: &s.Outcomes}, - {Field: &s.Capabilities}, - }...) -} - -func (s Source) String() string { - return fmt.Sprintf("source{Address: %s, Durable: %d, ExpiryPolicy: %s, Timeout: %d, "+ - "Dynamic: %t, DynamicNodeProperties: %v, DistributionMode: %s, Filter: %v, DefaultOutcome: %v "+ - "Outcomes: %v, Capabilities: %v}", - s.Address, - s.Durable, - s.ExpiryPolicy, - s.Timeout, - s.Dynamic, - s.DynamicNodeProperties, - s.DistributionMode, - s.Filter, - s.DefaultOutcome, - s.Outcomes, - s.Capabilities, - ) -} - -/* - - - - - - - - - - - - -*/ -type Target struct { - // the address of the target - // - // The address of the target MUST NOT be set when sent on a attach frame sent by - // the sending link endpoint where the dynamic flag is set to true (that is where - // the sender is requesting the receiver to create an addressable node). - // - // The address of the source MUST be set when sent on a attach frame sent by the - // receiving link endpoint where the dynamic flag is set to true (that is where - // the receiver has created an addressable node at the request of the sender and - // is now communicating the address of that created node). The generated name of - // the address SHOULD include the link name and the container-id of the remote - // container to allow for ease of identification. - Address string - - // indicates the durability of the terminus - // - // Indicates what state of the terminus will be retained durably: the state of durable - // messages, only existence and configuration of the terminus, or no state at all. - // - // 0: none - // 1: configuration - // 2: unsettled-state - Durable encoding.Durability - - // the expiry policy of the target - // - // link-detach: The expiry timer starts when terminus is detached. - // session-end: The expiry timer starts when the most recently associated session is - // ended. - // connection-close: The expiry timer starts when most recently associated connection - // is closed. - // never: The terminus never expires. - ExpiryPolicy encoding.ExpiryPolicy - - // duration that an expiring target will be retained - // - // The target starts expiring as indicated by the expiry-policy. - Timeout uint32 // seconds - - // request dynamic creation of a remote node - // - // When set to true by the sending link endpoint, this field constitutes a request - // for the receiving peer to dynamically create a node at the target. In this case - // the address field MUST NOT be set. - // - // When set to true by the receiving link endpoint this field indicates creation of - // a dynamically created node. In this case the address field will contain the - // address of the created node. The generated address SHOULD include the link name - // and other available information on the initiator of the request (such as the - // remote container-id) in some recognizable form for ease of traceability. - Dynamic bool - - // properties of the dynamically created node - // - // If the dynamic field is not set to true this field MUST be left unset. - // - // When set by the sending link endpoint, this field contains the desired - // properties of the node the sender wishes to be created. When set by the - // receiving link endpoint this field contains the actual properties of the - // dynamically created node. See subsection 3.5.9 for standard node properties. - // http://www.amqp.org/specification/1.0/node-properties - // - // lifetime-policy: The lifetime of a dynamically generated node. - // Definitionally, the lifetime will never be less than the lifetime - // of the link which caused its creation, however it is possible to - // extend the lifetime of dynamically created node using a lifetime - // policy. The value of this entry MUST be of a type which provides - // the lifetime-policy archetype. The following standard - // lifetime-policies are defined below: delete-on-close, - // delete-on-no-links, delete-on-no-messages or - // delete-on-no-links-or-messages. - // supported-dist-modes: The distribution modes that the node supports. - // The value of this entry MUST be one or more symbols which are valid - // distribution-modes. That is, the value MUST be of the same type as - // would be valid in a field defined with the following attributes: - // type="symbol" multiple="true" requires="distribution-mode" - DynamicNodeProperties map[encoding.Symbol]any // TODO: implement custom type with validation - - // the extension capabilities the sender supports/desires - // - // http://www.amqp.org/specification/1.0/target-capabilities - Capabilities encoding.MultiSymbol -} - -func (t *Target) Marshal(wr *buffer.Buffer) error { - return encoding.MarshalComposite(wr, encoding.TypeCodeTarget, []encoding.MarshalField{ - {Value: &t.Address, Omit: t.Address == ""}, - {Value: &t.Durable, Omit: t.Durable == encoding.DurabilityNone}, - {Value: &t.ExpiryPolicy, Omit: t.ExpiryPolicy == "" || t.ExpiryPolicy == encoding.ExpirySessionEnd}, - {Value: &t.Timeout, Omit: t.Timeout == 0}, - {Value: &t.Dynamic, Omit: !t.Dynamic}, - {Value: t.DynamicNodeProperties, Omit: len(t.DynamicNodeProperties) == 0}, - {Value: &t.Capabilities, Omit: len(t.Capabilities) == 0}, - }) -} - -func (t *Target) Unmarshal(r *buffer.Buffer) error { - return encoding.UnmarshalComposite(r, encoding.TypeCodeTarget, []encoding.UnmarshalField{ - {Field: &t.Address}, - {Field: &t.Durable}, - {Field: &t.ExpiryPolicy, HandleNull: func() error { t.ExpiryPolicy = encoding.ExpirySessionEnd; return nil }}, - {Field: &t.Timeout}, - {Field: &t.Dynamic}, - {Field: &t.DynamicNodeProperties}, - {Field: &t.Capabilities}, - }...) -} - -func (t Target) String() string { - return fmt.Sprintf("source{Address: %s, Durable: %d, ExpiryPolicy: %s, Timeout: %d, "+ - "Dynamic: %t, DynamicNodeProperties: %v, Capabilities: %v}", - t.Address, - t.Durable, - t.ExpiryPolicy, - t.Timeout, - t.Dynamic, - t.DynamicNodeProperties, - t.Capabilities, - ) -} - -// frame is the decoded representation of a frame -type Frame struct { - Type Type // AMQP/SASL - Channel uint16 // channel this frame is for - Body FrameBody // body of the frame -} - -// String implements the fmt.Stringer interface for type Frame. -func (f Frame) String() string { - return fmt.Sprintf("Frame{Type: %s, Channel: %d, Body: %s}", f.Type, f.Channel, f.Body) -} - -// frameBody adds some type safety to frame encoding -type FrameBody interface { - frameBody() -} - -/* - - - - - - - - - - - - - -*/ - -type PerformOpen struct { - ContainerID string // required - Hostname string - MaxFrameSize uint32 // default: 4294967295 - ChannelMax uint16 // default: 65535 - IdleTimeout time.Duration // from milliseconds - OutgoingLocales encoding.MultiSymbol - IncomingLocales encoding.MultiSymbol - OfferedCapabilities encoding.MultiSymbol - DesiredCapabilities encoding.MultiSymbol - Properties map[encoding.Symbol]any -} - -func (o *PerformOpen) frameBody() {} - -func (o *PerformOpen) Marshal(wr *buffer.Buffer) error { - return encoding.MarshalComposite(wr, encoding.TypeCodeOpen, []encoding.MarshalField{ - {Value: &o.ContainerID, Omit: false}, - {Value: &o.Hostname, Omit: o.Hostname == ""}, - {Value: &o.MaxFrameSize, Omit: o.MaxFrameSize == 4294967295}, - {Value: &o.ChannelMax, Omit: o.ChannelMax == 65535}, - {Value: (*encoding.Milliseconds)(&o.IdleTimeout), Omit: o.IdleTimeout == 0}, - {Value: &o.OutgoingLocales, Omit: len(o.OutgoingLocales) == 0}, - {Value: &o.IncomingLocales, Omit: len(o.IncomingLocales) == 0}, - {Value: &o.OfferedCapabilities, Omit: len(o.OfferedCapabilities) == 0}, - {Value: &o.DesiredCapabilities, Omit: len(o.DesiredCapabilities) == 0}, - {Value: o.Properties, Omit: len(o.Properties) == 0}, - }) -} - -func (o *PerformOpen) Unmarshal(r *buffer.Buffer) error { - return encoding.UnmarshalComposite(r, encoding.TypeCodeOpen, []encoding.UnmarshalField{ - {Field: &o.ContainerID, HandleNull: func() error { return errors.New("Open.ContainerID is required") }}, - {Field: &o.Hostname}, - {Field: &o.MaxFrameSize, HandleNull: func() error { o.MaxFrameSize = 4294967295; return nil }}, - {Field: &o.ChannelMax, HandleNull: func() error { o.ChannelMax = 65535; return nil }}, - {Field: (*encoding.Milliseconds)(&o.IdleTimeout)}, - {Field: &o.OutgoingLocales}, - {Field: &o.IncomingLocales}, - {Field: &o.OfferedCapabilities}, - {Field: &o.DesiredCapabilities}, - {Field: &o.Properties}, - }...) -} - -func (o *PerformOpen) String() string { - return fmt.Sprintf("Open{ContainerID : %s, Hostname: %s, MaxFrameSize: %d, "+ - "ChannelMax: %d, IdleTimeout: %v, "+ - "OutgoingLocales: %v, IncomingLocales: %v, "+ - "OfferedCapabilities: %v, DesiredCapabilities: %v, "+ - "Properties: %v}", - o.ContainerID, - o.Hostname, - o.MaxFrameSize, - o.ChannelMax, - o.IdleTimeout, - o.OutgoingLocales, - o.IncomingLocales, - o.OfferedCapabilities, - o.DesiredCapabilities, - o.Properties, - ) -} - -/* - - - - - - - - - - - - - -*/ -type PerformBegin struct { - // the remote channel for this session - // If a session is locally initiated, the remote-channel MUST NOT be set. - // When an endpoint responds to a remotely initiated session, the remote-channel - // MUST be set to the channel on which the remote session sent the begin. - RemoteChannel *uint16 - - // the transfer-id of the first transfer id the sender will send - NextOutgoingID uint32 // required, sequence number http://www.ietf.org/rfc/rfc1982.txt - - // the initial incoming-window of the sender - IncomingWindow uint32 // required - - // the initial outgoing-window of the sender - OutgoingWindow uint32 // required - - // the maximum handle value that can be used on the session - // The handle-max value is the highest handle value that can be - // used on the session. A peer MUST NOT attempt to attach a link - // using a handle value outside the range that its partner can handle. - // A peer that receives a handle outside the supported range MUST - // close the connection with the framing-error error-code. - HandleMax uint32 // default 4294967295 - - // the extension capabilities the sender supports - // http://www.amqp.org/specification/1.0/session-capabilities - OfferedCapabilities encoding.MultiSymbol - - // the extension capabilities the sender can use if the receiver supports them - // The sender MUST NOT attempt to use any capability other than those it - // has declared in desired-capabilities field. - DesiredCapabilities encoding.MultiSymbol - - // session properties - // http://www.amqp.org/specification/1.0/session-properties - Properties map[encoding.Symbol]any -} - -func (b *PerformBegin) frameBody() {} - -func (b *PerformBegin) String() string { - return fmt.Sprintf("Begin{RemoteChannel: %v, NextOutgoingID: %d, IncomingWindow: %d, "+ - "OutgoingWindow: %d, HandleMax: %d, OfferedCapabilities: %v, DesiredCapabilities: %v, "+ - "Properties: %v}", - formatUint16Ptr(b.RemoteChannel), - b.NextOutgoingID, - b.IncomingWindow, - b.OutgoingWindow, - b.HandleMax, - b.OfferedCapabilities, - b.DesiredCapabilities, - b.Properties, - ) -} - -func formatUint16Ptr(p *uint16) string { - if p == nil { - return "" - } - return strconv.FormatUint(uint64(*p), 10) -} - -func (b *PerformBegin) Marshal(wr *buffer.Buffer) error { - return encoding.MarshalComposite(wr, encoding.TypeCodeBegin, []encoding.MarshalField{ - {Value: b.RemoteChannel, Omit: b.RemoteChannel == nil}, - {Value: &b.NextOutgoingID, Omit: false}, - {Value: &b.IncomingWindow, Omit: false}, - {Value: &b.OutgoingWindow, Omit: false}, - {Value: &b.HandleMax, Omit: b.HandleMax == 4294967295}, - {Value: &b.OfferedCapabilities, Omit: len(b.OfferedCapabilities) == 0}, - {Value: &b.DesiredCapabilities, Omit: len(b.DesiredCapabilities) == 0}, - {Value: b.Properties, Omit: b.Properties == nil}, - }) -} - -func (b *PerformBegin) Unmarshal(r *buffer.Buffer) error { - return encoding.UnmarshalComposite(r, encoding.TypeCodeBegin, []encoding.UnmarshalField{ - {Field: &b.RemoteChannel}, - {Field: &b.NextOutgoingID, HandleNull: func() error { return errors.New("Begin.NextOutgoingID is required") }}, - {Field: &b.IncomingWindow, HandleNull: func() error { return errors.New("Begin.IncomingWindow is required") }}, - {Field: &b.OutgoingWindow, HandleNull: func() error { return errors.New("Begin.OutgoingWindow is required") }}, - {Field: &b.HandleMax, HandleNull: func() error { b.HandleMax = 4294967295; return nil }}, - {Field: &b.OfferedCapabilities}, - {Field: &b.DesiredCapabilities}, - {Field: &b.Properties}, - }...) -} - -/* - - - - - - - - - - - - - - - - - - - -*/ -type PerformAttach struct { - // the name of the link - // - // This name uniquely identifies the link from the container of the source - // to the container of the target node, e.g., if the container of the source - // node is A, and the container of the target node is B, the link MAY be - // globally identified by the (ordered) tuple (A,B,). - Name string // required - - // the handle for the link while attached - // - // The numeric handle assigned by the the peer as a shorthand to refer to the - // link in all performatives that reference the link until the it is detached. - // - // The handle MUST NOT be used for other open links. An attempt to attach using - // a handle which is already associated with a link MUST be responded to with - // an immediate close carrying a handle-in-use session-error. - // - // To make it easier to monitor AMQP link attach frames, it is RECOMMENDED that - // implementations always assign the lowest available handle to this field. - // - // The two endpoints MAY potentially use different handles to refer to the same link. - // Link handles MAY be reused once a link is closed for both send and receive. - Handle uint32 // required - - // role of the link endpoint - // - // The role being played by the peer, i.e., whether the peer is the sender or the - // receiver of messages on the link. - Role encoding.Role - - // settlement policy for the sender - // - // The delivery settlement policy for the sender. When set at the receiver this - // indicates the desired value for the settlement mode at the sender. When set - // at the sender this indicates the actual settlement mode in use. The sender - // SHOULD respect the receiver's desired settlement mode if the receiver initiates - // the attach exchange and the sender supports the desired mode. - // - // 0: unsettled - The sender will send all deliveries initially unsettled to the receiver. - // 1: settled - The sender will send all deliveries settled to the receiver. - // 2: mixed - The sender MAY send a mixture of settled and unsettled deliveries to the receiver. - SenderSettleMode *encoding.SenderSettleMode - - // the settlement policy of the receiver - // - // The delivery settlement policy for the receiver. When set at the sender this - // indicates the desired value for the settlement mode at the receiver. - // When set at the receiver this indicates the actual settlement mode in use. - // The receiver SHOULD respect the sender's desired settlement mode if the sender - // initiates the attach exchange and the receiver supports the desired mode. - // - // 0: first - The receiver will spontaneously settle all incoming transfers. - // 1: second - The receiver will only settle after sending the disposition to - // the sender and receiving a disposition indicating settlement of - // the delivery from the sender. - ReceiverSettleMode *encoding.ReceiverSettleMode - - // the source for messages - // - // If no source is specified on an outgoing link, then there is no source currently - // attached to the link. A link with no source will never produce outgoing messages. - Source *Source - - // the target for messages - // - // If no target is specified on an incoming link, then there is no target currently - // attached to the link. A link with no target will never permit incoming messages. - Target *Target - - // unsettled delivery state - // - // This is used to indicate any unsettled delivery states when a suspended link is - // resumed. The map is keyed by delivery-tag with values indicating the delivery state. - // The local and remote delivery states for a given delivery-tag MUST be compared to - // resolve any in-doubt deliveries. If necessary, deliveries MAY be resent, or resumed - // based on the outcome of this comparison. See subsection 2.6.13. - // - // If the local unsettled map is too large to be encoded within a frame of the agreed - // maximum frame size then the session MAY be ended with the frame-size-too-small error. - // The endpoint SHOULD make use of the ability to send an incomplete unsettled map - // (see below) to avoid sending an error. - // - // The unsettled map MUST NOT contain null valued keys. - // - // When reattaching (as opposed to resuming), the unsettled map MUST be null. - Unsettled encoding.Unsettled - - // If set to true this field indicates that the unsettled map provided is not complete. - // When the map is incomplete the recipient of the map cannot take the absence of a - // delivery tag from the map as evidence of settlement. On receipt of an incomplete - // unsettled map a sending endpoint MUST NOT send any new deliveries (i.e. deliveries - // where resume is not set to true) to its partner (and a receiving endpoint which sent - // an incomplete unsettled map MUST detach with an error on receiving a transfer which - // does not have the resume flag set to true). - // - // Note that if this flag is set to true then the endpoints MUST detach and reattach at - // least once in order to send new deliveries. This flag can be useful when there are - // too many entries in the unsettled map to fit within a single frame. An endpoint can - // attach, resume, settle, and detach until enough unsettled state has been cleared for - // an attach where this flag is set to false. - IncompleteUnsettled bool // default: false - - // the sender's initial value for delivery-count - // - // This MUST NOT be null if role is sender, and it is ignored if the role is receiver. - InitialDeliveryCount uint32 // sequence number - - // the maximum message size supported by the link endpoint - // - // This field indicates the maximum message size supported by the link endpoint. - // Any attempt to deliver a message larger than this results in a message-size-exceeded - // link-error. If this field is zero or unset, there is no maximum size imposed by the - // link endpoint. - MaxMessageSize uint64 - - // the extension capabilities the sender supports - // http://www.amqp.org/specification/1.0/link-capabilities - OfferedCapabilities encoding.MultiSymbol - - // the extension capabilities the sender can use if the receiver supports them - // - // The sender MUST NOT attempt to use any capability other than those it - // has declared in desired-capabilities field. - DesiredCapabilities encoding.MultiSymbol - - // link properties - // http://www.amqp.org/specification/1.0/link-properties - Properties map[encoding.Symbol]any -} - -func (a *PerformAttach) frameBody() {} - -func (a PerformAttach) String() string { - return fmt.Sprintf("Attach{Name: %s, Handle: %d, Role: %s, SenderSettleMode: %s, ReceiverSettleMode: %s, "+ - "Source: %v, Target: %v, Unsettled: %v, IncompleteUnsettled: %t, InitialDeliveryCount: %d, MaxMessageSize: %d, "+ - "OfferedCapabilities: %v, DesiredCapabilities: %v, Properties: %v}", - a.Name, - a.Handle, - a.Role, - a.SenderSettleMode, - a.ReceiverSettleMode, - a.Source, - a.Target, - a.Unsettled, - a.IncompleteUnsettled, - a.InitialDeliveryCount, - a.MaxMessageSize, - a.OfferedCapabilities, - a.DesiredCapabilities, - a.Properties, - ) -} - -func (a *PerformAttach) Marshal(wr *buffer.Buffer) error { - return encoding.MarshalComposite(wr, encoding.TypeCodeAttach, []encoding.MarshalField{ - {Value: &a.Name, Omit: false}, - {Value: &a.Handle, Omit: false}, - {Value: &a.Role, Omit: false}, - {Value: a.SenderSettleMode, Omit: a.SenderSettleMode == nil}, - {Value: a.ReceiverSettleMode, Omit: a.ReceiverSettleMode == nil}, - {Value: a.Source, Omit: a.Source == nil}, - {Value: a.Target, Omit: a.Target == nil}, - {Value: a.Unsettled, Omit: len(a.Unsettled) == 0}, - {Value: &a.IncompleteUnsettled, Omit: !a.IncompleteUnsettled}, - {Value: &a.InitialDeliveryCount, Omit: a.Role == encoding.RoleReceiver}, - {Value: &a.MaxMessageSize, Omit: a.MaxMessageSize == 0}, - {Value: &a.OfferedCapabilities, Omit: len(a.OfferedCapabilities) == 0}, - {Value: &a.DesiredCapabilities, Omit: len(a.DesiredCapabilities) == 0}, - {Value: a.Properties, Omit: len(a.Properties) == 0}, - }) -} - -func (a *PerformAttach) Unmarshal(r *buffer.Buffer) error { - return encoding.UnmarshalComposite(r, encoding.TypeCodeAttach, []encoding.UnmarshalField{ - {Field: &a.Name, HandleNull: func() error { return errors.New("Attach.Name is required") }}, - {Field: &a.Handle, HandleNull: func() error { return errors.New("Attach.Handle is required") }}, - {Field: &a.Role, HandleNull: func() error { return errors.New("Attach.Role is required") }}, - {Field: &a.SenderSettleMode}, - {Field: &a.ReceiverSettleMode}, - {Field: &a.Source}, - {Field: &a.Target}, - {Field: &a.Unsettled}, - {Field: &a.IncompleteUnsettled}, - {Field: &a.InitialDeliveryCount}, - {Field: &a.MaxMessageSize}, - {Field: &a.OfferedCapabilities}, - {Field: &a.DesiredCapabilities}, - {Field: &a.Properties}, - }...) -} - -/* - - - - - - - - - - - - - - - - -*/ -type PerformFlow struct { - // Identifies the expected transfer-id of the next incoming transfer frame. - // This value MUST be set if the peer has received the begin frame for the - // session, and MUST NOT be set if it has not. See subsection 2.5.6 for more details. - NextIncomingID *uint32 // sequence number - - // Defines the maximum number of incoming transfer frames that the endpoint - // can currently receive. See subsection 2.5.6 for more details. - IncomingWindow uint32 // required - - // The transfer-id that will be assigned to the next outgoing transfer frame. - // See subsection 2.5.6 for more details. - NextOutgoingID uint32 // sequence number - - // Defines the maximum number of outgoing transfer frames that the endpoint - // could potentially currently send, if it was not constrained by restrictions - // imposed by its peer's incoming-window. See subsection 2.5.6 for more details. - OutgoingWindow uint32 - - // If set, indicates that the flow frame carries flow state information for the local - // link endpoint associated with the given handle. If not set, the flow frame is - // carrying only information pertaining to the session endpoint. - // - // If set to a handle that is not currently associated with an attached link, - // the recipient MUST respond by ending the session with an unattached-handle - // session error. - Handle *uint32 - - // The delivery-count is initialized by the sender when a link endpoint is created, - // and is incremented whenever a message is sent. Only the sender MAY independently - // modify this field. The receiver's value is calculated based on the last known - // value from the sender and any subsequent messages received on the link. Note that, - // despite its name, the delivery-count is not a count but a sequence number - // initialized at an arbitrary point by the sender. - // - // When the handle field is not set, this field MUST NOT be set. - // - // When the handle identifies that the flow state is being sent from the sender link - // endpoint to receiver link endpoint this field MUST be set to the current - // delivery-count of the link endpoint. - // - // When the flow state is being sent from the receiver endpoint to the sender endpoint - // this field MUST be set to the last known value of the corresponding sending endpoint. - // In the event that the receiving link endpoint has not yet seen the initial attach - // frame from the sender this field MUST NOT be set. - DeliveryCount *uint32 // sequence number - - // the current maximum number of messages that can be received - // - // The current maximum number of messages that can be handled at the receiver endpoint - // of the link. Only the receiver endpoint can independently set this value. The sender - // endpoint sets this to the last known value seen from the receiver. - // See subsection 2.6.7 for more details. - // - // When the handle field is not set, this field MUST NOT be set. - LinkCredit *uint32 - - // the number of available messages - // - // The number of messages awaiting credit at the link sender endpoint. Only the sender - // can independently set this value. The receiver sets this to the last known value seen - // from the sender. See subsection 2.6.7 for more details. - // - // When the handle field is not set, this field MUST NOT be set. - Available *uint32 - - // indicates drain mode - // - // When flow state is sent from the sender to the receiver, this field contains the - // actual drain mode of the sender. When flow state is sent from the receiver to the - // sender, this field contains the desired drain mode of the receiver. - // See subsection 2.6.7 for more details. - // - // When the handle field is not set, this field MUST NOT be set. - Drain bool - - // request state from partner - // - // If set to true then the receiver SHOULD send its state at the earliest convenient - // opportunity. - // - // If set to true, and the handle field is not set, then the sender only requires - // session endpoint state to be echoed, however, the receiver MAY fulfil this requirement - // by sending a flow performative carrying link-specific state (since any such flow also - // carries session state). - // - // If a sender makes multiple requests for the same state before the receiver can reply, - // the receiver MAY send only one flow in return. - // - // Note that if a peer responds to echo requests with flows which themselves have the - // echo field set to true, an infinite loop could result if its partner adopts the same - // policy (therefore such a policy SHOULD be avoided). - Echo bool - - // link state properties - // http://www.amqp.org/specification/1.0/link-state-properties - Properties map[encoding.Symbol]any -} - -func (f *PerformFlow) frameBody() {} - -func (f *PerformFlow) String() string { - return fmt.Sprintf("Flow{NextIncomingID: %s, IncomingWindow: %d, NextOutgoingID: %d, OutgoingWindow: %d, "+ - "Handle: %s, DeliveryCount: %s, LinkCredit: %s, Available: %s, Drain: %t, Echo: %t, Properties: %+v}", - formatUint32Ptr(f.NextIncomingID), - f.IncomingWindow, - f.NextOutgoingID, - f.OutgoingWindow, - formatUint32Ptr(f.Handle), - formatUint32Ptr(f.DeliveryCount), - formatUint32Ptr(f.LinkCredit), - formatUint32Ptr(f.Available), - f.Drain, - f.Echo, - f.Properties, - ) -} - -func formatUint32Ptr(p *uint32) string { - if p == nil { - return "" - } - return strconv.FormatUint(uint64(*p), 10) -} - -func (f *PerformFlow) Marshal(wr *buffer.Buffer) error { - return encoding.MarshalComposite(wr, encoding.TypeCodeFlow, []encoding.MarshalField{ - {Value: f.NextIncomingID, Omit: f.NextIncomingID == nil}, - {Value: &f.IncomingWindow, Omit: false}, - {Value: &f.NextOutgoingID, Omit: false}, - {Value: &f.OutgoingWindow, Omit: false}, - {Value: f.Handle, Omit: f.Handle == nil}, - {Value: f.DeliveryCount, Omit: f.DeliveryCount == nil}, - {Value: f.LinkCredit, Omit: f.LinkCredit == nil}, - {Value: f.Available, Omit: f.Available == nil}, - {Value: &f.Drain, Omit: !f.Drain}, - {Value: &f.Echo, Omit: !f.Echo}, - {Value: f.Properties, Omit: len(f.Properties) == 0}, - }) -} - -func (f *PerformFlow) Unmarshal(r *buffer.Buffer) error { - return encoding.UnmarshalComposite(r, encoding.TypeCodeFlow, []encoding.UnmarshalField{ - {Field: &f.NextIncomingID}, - {Field: &f.IncomingWindow, HandleNull: func() error { return errors.New("Flow.IncomingWindow is required") }}, - {Field: &f.NextOutgoingID, HandleNull: func() error { return errors.New("Flow.NextOutgoingID is required") }}, - {Field: &f.OutgoingWindow, HandleNull: func() error { return errors.New("Flow.OutgoingWindow is required") }}, - {Field: &f.Handle}, - {Field: &f.DeliveryCount}, - {Field: &f.LinkCredit}, - {Field: &f.Available}, - {Field: &f.Drain}, - {Field: &f.Echo}, - {Field: &f.Properties}, - }...) -} - -/* - - - - - - - - - - - - - - - - -*/ -type PerformTransfer struct { - // Specifies the link on which the message is transferred. - Handle uint32 // required - - // The delivery-id MUST be supplied on the first transfer of a multi-transfer - // delivery. On continuation transfers the delivery-id MAY be omitted. It is - // an error if the delivery-id on a continuation transfer differs from the - // delivery-id on the first transfer of a delivery. - DeliveryID *uint32 // sequence number - - // Uniquely identifies the delivery attempt for a given message on this link. - // This field MUST be specified for the first transfer of a multi-transfer - // message and can only be omitted for continuation transfers. It is an error - // if the delivery-tag on a continuation transfer differs from the delivery-tag - // on the first transfer of a delivery. - DeliveryTag []byte // up to 32 bytes - - // This field MUST be specified for the first transfer of a multi-transfer message - // and can only be omitted for continuation transfers. It is an error if the - // message-format on a continuation transfer differs from the message-format on - // the first transfer of a delivery. - // - // The upper three octets of a message format code identify a particular message - // format. The lowest octet indicates the version of said message format. Any given - // version of a format is forwards compatible with all higher versions. - MessageFormat *uint32 - - // If not set on the first (or only) transfer for a (multi-transfer) delivery, - // then the settled flag MUST be interpreted as being false. For subsequent - // transfers in a multi-transfer delivery if the settled flag is left unset then - // it MUST be interpreted as true if and only if the value of the settled flag on - // any of the preceding transfers was true; if no preceding transfer was sent with - // settled being true then the value when unset MUST be taken as false. - // - // If the negotiated value for snd-settle-mode at attachment is settled, then this - // field MUST be true on at least one transfer frame for a delivery (i.e., the - // delivery MUST be settled at the sender at the point the delivery has been - // completely transferred). - // - // If the negotiated value for snd-settle-mode at attachment is unsettled, then this - // field MUST be false (or unset) on every transfer frame for a delivery (unless the - // delivery is aborted). - Settled bool - - // indicates that the message has more content - // - // Note that if both the more and aborted fields are set to true, the aborted flag - // takes precedence. That is, a receiver SHOULD ignore the value of the more field - // if the transfer is marked as aborted. A sender SHOULD NOT set the more flag to - // true if it also sets the aborted flag to true. - More bool - - // If first, this indicates that the receiver MUST settle the delivery once it has - // arrived without waiting for the sender to settle first. - // - // If second, this indicates that the receiver MUST NOT settle until sending its - // disposition to the sender and receiving a settled disposition from the sender. - // - // If not set, this value is defaulted to the value negotiated on link attach. - // - // If the negotiated link value is first, then it is illegal to set this field - // to second. - // - // If the message is being sent settled by the sender, the value of this field - // is ignored. - // - // The (implicit or explicit) value of this field does not form part of the - // transfer state, and is not retained if a link is suspended and subsequently resumed. - // - // 0: first - The receiver will spontaneously settle all incoming transfers. - // 1: second - The receiver will only settle after sending the disposition to - // the sender and receiving a disposition indicating settlement of - // the delivery from the sender. - ReceiverSettleMode *encoding.ReceiverSettleMode - - // the state of the delivery at the sender - // - // When set this informs the receiver of the state of the delivery at the sender. - // This is particularly useful when transfers of unsettled deliveries are resumed - // after resuming a link. Setting the state on the transfer can be thought of as - // being equivalent to sending a disposition immediately before the transfer - // performative, i.e., it is the state of the delivery (not the transfer) that - // existed at the point the frame was sent. - // - // Note that if the transfer performative (or an earlier disposition performative - // referring to the delivery) indicates that the delivery has attained a terminal - // state, then no future transfer or disposition sent by the sender can alter that - // terminal state. - State encoding.DeliveryState - - // indicates a resumed delivery - // - // If true, the resume flag indicates that the transfer is being used to reassociate - // an unsettled delivery from a dissociated link endpoint. See subsection 2.6.13 - // for more details. - // - // The receiver MUST ignore resumed deliveries that are not in its local unsettled map. - // The sender MUST NOT send resumed transfers for deliveries not in its local - // unsettled map. - // - // If a resumed delivery spans more than one transfer performative, then the resume - // flag MUST be set to true on the first transfer of the resumed delivery. For - // subsequent transfers for the same delivery the resume flag MAY be set to true, - // or MAY be omitted. - // - // In the case where the exchange of unsettled maps makes clear that all message - // data has been successfully transferred to the receiver, and that only the final - // state (and potentially settlement) at the sender needs to be conveyed, then a - // resumed delivery MAY carry no payload and instead act solely as a vehicle for - // carrying the terminal state of the delivery at the sender. - Resume bool - - // indicates that the message is aborted - // - // Aborted messages SHOULD be discarded by the recipient (any payload within the - // frame carrying the performative MUST be ignored). An aborted message is - // implicitly settled. - Aborted bool - - // batchable hint - // - // If true, then the issuer is hinting that there is no need for the peer to urgently - // communicate updated delivery state. This hint MAY be used to artificially increase - // the amount of batching an implementation uses when communicating delivery states, - // and thereby save bandwidth. - // - // If the message being delivered is too large to fit within a single frame, then the - // setting of batchable to true on any of the transfer performatives for the delivery - // is equivalent to setting batchable to true for all the transfer performatives for - // the delivery. - // - // The batchable value does not form part of the transfer state, and is not retained - // if a link is suspended and subsequently resumed. - Batchable bool - - Payload []byte - - // optional channel to indicate to sender that transfer has completed - // - // Settled=true: closed when the transferred on network. - // Settled=false: closed when the receiver has confirmed settlement. - Done chan encoding.DeliveryState -} - -func (t *PerformTransfer) frameBody() {} - -func (t PerformTransfer) String() string { - deliveryTag := "" - if t.DeliveryTag != nil { - deliveryTag = fmt.Sprintf("%X", t.DeliveryTag) - } - - return fmt.Sprintf("Transfer{Handle: %d, DeliveryID: %s, DeliveryTag: %s, MessageFormat: %s, "+ - "Settled: %t, More: %t, ReceiverSettleMode: %s, State: %v, Resume: %t, Aborted: %t, "+ - "Batchable: %t, Payload [size]: %d}", - t.Handle, - formatUint32Ptr(t.DeliveryID), - deliveryTag, - formatUint32Ptr(t.MessageFormat), - t.Settled, - t.More, - t.ReceiverSettleMode, - t.State, - t.Resume, - t.Aborted, - t.Batchable, - len(t.Payload), - ) -} - -func (t *PerformTransfer) Marshal(wr *buffer.Buffer) error { - err := encoding.MarshalComposite(wr, encoding.TypeCodeTransfer, []encoding.MarshalField{ - {Value: &t.Handle}, - {Value: t.DeliveryID, Omit: t.DeliveryID == nil}, - {Value: &t.DeliveryTag, Omit: len(t.DeliveryTag) == 0}, - {Value: t.MessageFormat, Omit: t.MessageFormat == nil}, - {Value: &t.Settled, Omit: !t.Settled}, - {Value: &t.More, Omit: !t.More}, - {Value: t.ReceiverSettleMode, Omit: t.ReceiverSettleMode == nil}, - {Value: t.State, Omit: t.State == nil}, - {Value: &t.Resume, Omit: !t.Resume}, - {Value: &t.Aborted, Omit: !t.Aborted}, - {Value: &t.Batchable, Omit: !t.Batchable}, - }) - if err != nil { - return err - } - - wr.Append(t.Payload) - return nil -} - -func (t *PerformTransfer) Unmarshal(r *buffer.Buffer) error { - err := encoding.UnmarshalComposite(r, encoding.TypeCodeTransfer, []encoding.UnmarshalField{ - {Field: &t.Handle, HandleNull: func() error { return errors.New("Transfer.Handle is required") }}, - {Field: &t.DeliveryID}, - {Field: &t.DeliveryTag}, - {Field: &t.MessageFormat}, - {Field: &t.Settled}, - {Field: &t.More}, - {Field: &t.ReceiverSettleMode}, - {Field: &t.State}, - {Field: &t.Resume}, - {Field: &t.Aborted}, - {Field: &t.Batchable}, - }...) - if err != nil { - return err - } - - t.Payload = append([]byte(nil), r.Bytes()...) - - return err -} - -/* - - - - - - - - - - - -*/ -type PerformDisposition struct { - // directionality of disposition - // - // The role identifies whether the disposition frame contains information about - // sending link endpoints or receiving link endpoints. - Role encoding.Role - - // lower bound of deliveries - // - // Identifies the lower bound of delivery-ids for the deliveries in this set. - First uint32 // required, sequence number - - // upper bound of deliveries - // - // Identifies the upper bound of delivery-ids for the deliveries in this set. - // If not set, this is taken to be the same as first. - Last *uint32 // sequence number - - // indicates deliveries are settled - // - // If true, indicates that the referenced deliveries are considered settled by - // the issuing endpoint. - Settled bool - - // indicates state of deliveries - // - // Communicates the state of all the deliveries referenced by this disposition. - State encoding.DeliveryState - - // batchable hint - // - // If true, then the issuer is hinting that there is no need for the peer to - // urgently communicate the impact of the updated delivery states. This hint - // MAY be used to artificially increase the amount of batching an implementation - // uses when communicating delivery states, and thereby save bandwidth. - Batchable bool -} - -func (d *PerformDisposition) frameBody() {} - -func (d PerformDisposition) String() string { - return fmt.Sprintf("Disposition{Role: %s, First: %d, Last: %s, Settled: %t, State: %v, Batchable: %t}", - d.Role, - d.First, - formatUint32Ptr(d.Last), - d.Settled, - d.State, - d.Batchable, - ) -} - -func (d *PerformDisposition) Marshal(wr *buffer.Buffer) error { - return encoding.MarshalComposite(wr, encoding.TypeCodeDisposition, []encoding.MarshalField{ - {Value: &d.Role, Omit: false}, - {Value: &d.First, Omit: false}, - {Value: d.Last, Omit: d.Last == nil}, - {Value: &d.Settled, Omit: !d.Settled}, - {Value: d.State, Omit: d.State == nil}, - {Value: &d.Batchable, Omit: !d.Batchable}, - }) -} - -func (d *PerformDisposition) Unmarshal(r *buffer.Buffer) error { - return encoding.UnmarshalComposite(r, encoding.TypeCodeDisposition, []encoding.UnmarshalField{ - {Field: &d.Role, HandleNull: func() error { return errors.New("Disposition.Role is required") }}, - {Field: &d.First, HandleNull: func() error { return errors.New("Disposition.Handle is required") }}, - {Field: &d.Last}, - {Field: &d.Settled}, - {Field: &d.State}, - {Field: &d.Batchable}, - }...) -} - -/* - - - - - - - - -*/ -type PerformDetach struct { - // the local handle of the link to be detached - Handle uint32 //required - - // if true then the sender has closed the link - Closed bool - - // error causing the detach - // - // If set, this field indicates that the link is being detached due to an error - // condition. The value of the field SHOULD contain details on the cause of the error. - Error *encoding.Error -} - -func (d *PerformDetach) frameBody() {} - -func (d PerformDetach) String() string { - return fmt.Sprintf("Detach{Handle: %d, Closed: %t, Error: %v}", - d.Handle, - d.Closed, - d.Error, - ) -} - -func (d *PerformDetach) Marshal(wr *buffer.Buffer) error { - return encoding.MarshalComposite(wr, encoding.TypeCodeDetach, []encoding.MarshalField{ - {Value: &d.Handle, Omit: false}, - {Value: &d.Closed, Omit: !d.Closed}, - {Value: d.Error, Omit: d.Error == nil}, - }) -} - -func (d *PerformDetach) Unmarshal(r *buffer.Buffer) error { - return encoding.UnmarshalComposite(r, encoding.TypeCodeDetach, []encoding.UnmarshalField{ - {Field: &d.Handle, HandleNull: func() error { return errors.New("Detach.Handle is required") }}, - {Field: &d.Closed}, - {Field: &d.Error}, - }...) -} - -/* - - - - - - -*/ -type PerformEnd struct { - // error causing the end - // - // If set, this field indicates that the session is being ended due to an error - // condition. The value of the field SHOULD contain details on the cause of the error. - Error *encoding.Error -} - -func (e *PerformEnd) frameBody() {} - -func (d PerformEnd) String() string { - return fmt.Sprintf("End{Error: %v}", d.Error) -} - -func (e *PerformEnd) Marshal(wr *buffer.Buffer) error { - return encoding.MarshalComposite(wr, encoding.TypeCodeEnd, []encoding.MarshalField{ - {Value: e.Error, Omit: e.Error == nil}, - }) -} - -func (e *PerformEnd) Unmarshal(r *buffer.Buffer) error { - return encoding.UnmarshalComposite(r, encoding.TypeCodeEnd, - encoding.UnmarshalField{Field: &e.Error}, - ) -} - -/* - - - - - - -*/ -type PerformClose struct { - // error causing the close - // - // If set, this field indicates that the session is being closed due to an error - // condition. The value of the field SHOULD contain details on the cause of the error. - Error *encoding.Error -} - -func (c *PerformClose) frameBody() {} - -func (c *PerformClose) Marshal(wr *buffer.Buffer) error { - return encoding.MarshalComposite(wr, encoding.TypeCodeClose, []encoding.MarshalField{ - {Value: c.Error, Omit: c.Error == nil}, - }) -} - -func (c *PerformClose) Unmarshal(r *buffer.Buffer) error { - return encoding.UnmarshalComposite(r, encoding.TypeCodeClose, - encoding.UnmarshalField{Field: &c.Error}, - ) -} - -func (c *PerformClose) String() string { - return fmt.Sprintf("Close{Error: %s}", c.Error) -} - -/* - - - - - - -*/ - -type SASLInit struct { - Mechanism encoding.Symbol - InitialResponse []byte - Hostname string -} - -func (si *SASLInit) frameBody() {} - -func (si *SASLInit) Marshal(wr *buffer.Buffer) error { - return encoding.MarshalComposite(wr, encoding.TypeCodeSASLInit, []encoding.MarshalField{ - {Value: &si.Mechanism, Omit: false}, - {Value: &si.InitialResponse, Omit: false}, - {Value: &si.Hostname, Omit: len(si.Hostname) == 0}, - }) -} - -func (si *SASLInit) Unmarshal(r *buffer.Buffer) error { - return encoding.UnmarshalComposite(r, encoding.TypeCodeSASLInit, []encoding.UnmarshalField{ - {Field: &si.Mechanism, HandleNull: func() error { return errors.New("saslInit.Mechanism is required") }}, - {Field: &si.InitialResponse}, - {Field: &si.Hostname}, - }...) -} - -func (si *SASLInit) String() string { - // Elide the InitialResponse as it may contain a plain text secret. - return fmt.Sprintf("SaslInit{Mechanism : %s, InitialResponse: ********, Hostname: %s}", - si.Mechanism, - si.Hostname, - ) -} - -/* - - - - -*/ - -type SASLMechanisms struct { - Mechanisms encoding.MultiSymbol -} - -func (sm *SASLMechanisms) frameBody() {} - -func (sm *SASLMechanisms) Marshal(wr *buffer.Buffer) error { - return encoding.MarshalComposite(wr, encoding.TypeCodeSASLMechanism, []encoding.MarshalField{ - {Value: &sm.Mechanisms, Omit: false}, - }) -} - -func (sm *SASLMechanisms) Unmarshal(r *buffer.Buffer) error { - return encoding.UnmarshalComposite(r, encoding.TypeCodeSASLMechanism, - encoding.UnmarshalField{Field: &sm.Mechanisms, HandleNull: func() error { return errors.New("saslMechanisms.Mechanisms is required") }}, - ) -} - -func (sm *SASLMechanisms) String() string { - return fmt.Sprintf("SaslMechanisms{Mechanisms : %v}", - sm.Mechanisms, - ) -} - -/* - - - - -*/ - -type SASLChallenge struct { - Challenge []byte -} - -func (sc *SASLChallenge) String() string { - return "Challenge{Challenge: ********}" -} - -func (sc *SASLChallenge) frameBody() {} - -func (sc *SASLChallenge) Marshal(wr *buffer.Buffer) error { - return encoding.MarshalComposite(wr, encoding.TypeCodeSASLChallenge, []encoding.MarshalField{ - {Value: &sc.Challenge, Omit: false}, - }) -} - -func (sc *SASLChallenge) Unmarshal(r *buffer.Buffer) error { - return encoding.UnmarshalComposite(r, encoding.TypeCodeSASLChallenge, []encoding.UnmarshalField{ - {Field: &sc.Challenge, HandleNull: func() error { return errors.New("saslChallenge.Challenge is required") }}, - }...) -} - -/* - - - - -*/ - -type SASLResponse struct { - Response []byte -} - -func (sr *SASLResponse) String() string { - return "Response{Response: ********}" -} - -func (sr *SASLResponse) frameBody() {} - -func (sr *SASLResponse) Marshal(wr *buffer.Buffer) error { - return encoding.MarshalComposite(wr, encoding.TypeCodeSASLResponse, []encoding.MarshalField{ - {Value: &sr.Response, Omit: false}, - }) -} - -func (sr *SASLResponse) Unmarshal(r *buffer.Buffer) error { - return encoding.UnmarshalComposite(r, encoding.TypeCodeSASLResponse, []encoding.UnmarshalField{ - {Field: &sr.Response, HandleNull: func() error { return errors.New("saslResponse.Response is required") }}, - }...) -} - -/* - - - - - -*/ - -type SASLOutcome struct { - Code encoding.SASLCode - AdditionalData []byte -} - -func (so *SASLOutcome) frameBody() {} - -func (so *SASLOutcome) Marshal(wr *buffer.Buffer) error { - return encoding.MarshalComposite(wr, encoding.TypeCodeSASLOutcome, []encoding.MarshalField{ - {Value: &so.Code, Omit: false}, - {Value: &so.AdditionalData, Omit: len(so.AdditionalData) == 0}, - }) -} - -func (so *SASLOutcome) Unmarshal(r *buffer.Buffer) error { - return encoding.UnmarshalComposite(r, encoding.TypeCodeSASLOutcome, []encoding.UnmarshalField{ - {Field: &so.Code, HandleNull: func() error { return errors.New("saslOutcome.AdditionalData is required") }}, - {Field: &so.AdditionalData}, - }...) -} - -func (so *SASLOutcome) String() string { - return fmt.Sprintf("SaslOutcome{Code : %v, AdditionalData: %v}", - so.Code, - so.AdditionalData, - ) -} diff --git a/sdk/messaging/azservicebus/internal/go-amqp/internal/frames/parsing.go b/sdk/messaging/azservicebus/internal/go-amqp/internal/frames/parsing.go deleted file mode 100644 index 2b0b34837774..000000000000 --- a/sdk/messaging/azservicebus/internal/go-amqp/internal/frames/parsing.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright (C) 2017 Kale Blankenship -// Portions Copyright (c) Microsoft Corporation - -package frames - -import ( - "encoding/binary" - "errors" - "fmt" - "math" - - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp/internal/buffer" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp/internal/encoding" -) - -const HeaderSize = 8 - -// Frame structure: -// -// header (8 bytes) -// 0-3: SIZE (total size, at least 8 bytes for header, uint32) -// 4: DOFF (data offset,at least 2, count of 4 bytes words, uint8) -// 5: TYPE (frame type) -// 0x0: AMQP -// 0x1: SASL -// 6-7: type dependent (channel for AMQP) -// extended header (opt) -// body (opt) - -// Header in a structure appropriate for use with binary.Read() -type Header struct { - // size: an unsigned 32-bit integer that MUST contain the total frame size of the frame header, - // extended header, and frame body. The frame is malformed if the size is less than the size of - // the frame header (8 bytes). - Size uint32 - // doff: gives the position of the body within the frame. The value of the data offset is an - // unsigned, 8-bit integer specifying a count of 4-byte words. Due to the mandatory 8-byte - // frame header, the frame is malformed if the value is less than 2. - DataOffset uint8 - FrameType uint8 - Channel uint16 -} - -// ParseHeader reads the header from r and returns the result. -// -// No validation is done. -func ParseHeader(r *buffer.Buffer) (Header, error) { - buf, ok := r.Next(8) - if !ok { - return Header{}, errors.New("invalid frameHeader") - } - _ = buf[7] - - fh := Header{ - Size: binary.BigEndian.Uint32(buf[0:4]), - DataOffset: buf[4], - FrameType: buf[5], - Channel: binary.BigEndian.Uint16(buf[6:8]), - } - - if fh.Size < HeaderSize { - return fh, fmt.Errorf("received frame header with invalid size %d", fh.Size) - } - - if fh.DataOffset < 2 { - return fh, fmt.Errorf("received frame header with invalid data offset %d", fh.DataOffset) - } - - return fh, nil -} - -// ParseBody reads and unmarshals an AMQP frame. -func ParseBody(r *buffer.Buffer) (FrameBody, error) { - payload := r.Bytes() - - if r.Len() < 3 || payload[0] != 0 || encoding.AMQPType(payload[1]) != encoding.TypeCodeSmallUlong { - return nil, errors.New("invalid frame body header") - } - - switch pType := encoding.AMQPType(payload[2]); pType { - case encoding.TypeCodeOpen: - t := new(PerformOpen) - err := t.Unmarshal(r) - return t, err - case encoding.TypeCodeBegin: - t := new(PerformBegin) - err := t.Unmarshal(r) - return t, err - case encoding.TypeCodeAttach: - t := new(PerformAttach) - err := t.Unmarshal(r) - return t, err - case encoding.TypeCodeFlow: - t := new(PerformFlow) - err := t.Unmarshal(r) - return t, err - case encoding.TypeCodeTransfer: - t := new(PerformTransfer) - err := t.Unmarshal(r) - return t, err - case encoding.TypeCodeDisposition: - t := new(PerformDisposition) - err := t.Unmarshal(r) - return t, err - case encoding.TypeCodeDetach: - t := new(PerformDetach) - err := t.Unmarshal(r) - return t, err - case encoding.TypeCodeEnd: - t := new(PerformEnd) - err := t.Unmarshal(r) - return t, err - case encoding.TypeCodeClose: - t := new(PerformClose) - err := t.Unmarshal(r) - return t, err - case encoding.TypeCodeSASLMechanism: - t := new(SASLMechanisms) - err := t.Unmarshal(r) - return t, err - case encoding.TypeCodeSASLChallenge: - t := new(SASLChallenge) - err := t.Unmarshal(r) - return t, err - case encoding.TypeCodeSASLOutcome: - t := new(SASLOutcome) - err := t.Unmarshal(r) - return t, err - default: - return nil, fmt.Errorf("unknown performative type %02x", pType) - } -} - -// Write encodes fr into buf. -// split out from conn.WriteFrame for testing purposes. -func Write(buf *buffer.Buffer, fr Frame) error { - // write header - buf.Append([]byte{ - 0, 0, 0, 0, // size, overwrite later - 2, // doff, see frameHeader.DataOffset comment - uint8(fr.Type), // frame type - }) - buf.AppendUint16(fr.Channel) // channel - - // write AMQP frame body - err := encoding.Marshal(buf, fr.Body) - if err != nil { - return err - } - - // validate size - if uint(buf.Len()) > math.MaxUint32 { - return errors.New("frame too large") - } - - // retrieve raw bytes - bufBytes := buf.Bytes() - - // write correct size - binary.BigEndian.PutUint32(bufBytes, uint32(len(bufBytes))) - return nil -} diff --git a/sdk/messaging/azservicebus/internal/go-amqp/internal/queue/queue.go b/sdk/messaging/azservicebus/internal/go-amqp/internal/queue/queue.go deleted file mode 100644 index 45d6f5af9daf..000000000000 --- a/sdk/messaging/azservicebus/internal/go-amqp/internal/queue/queue.go +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright (c) Microsoft Corporation - -package queue - -import ( - "container/ring" -) - -// Holder provides synchronized access to a *Queue[T]. -type Holder[T any] struct { - // these channels work in tandem to provide exclusive access to the underlying *Queue[T]. - // each channel is created with a buffer size of one. - // empty behaves like a mutex when there's one or more messages in the queue. - // populated is like a semaphore when the queue is empty. - // the *Queue[T] is only ever in one channel. which channel depends on if it contains any items. - // the initial state is for empty to contain an empty queue. - empty chan *Queue[T] - populated chan *Queue[T] -} - -// NewHolder creates a new Holder[T] that contains the provided *Queue[T]. -func NewHolder[T any](q *Queue[T]) *Holder[T] { - h := &Holder[T]{ - empty: make(chan *Queue[T], 1), - populated: make(chan *Queue[T], 1), - } - h.Release(q) - return h -} - -// Acquire attempts to acquire the *Queue[T]. If the *Queue[T] has already been acquired the call blocks. -// When the *Queue[T] is no longer required, you MUST call Release() to relinquish acquisition. -func (h *Holder[T]) Acquire() *Queue[T] { - // the queue will be in only one of the channels, it doesn't matter which one - var q *Queue[T] - select { - case q = <-h.empty: - // empty queue - case q = <-h.populated: - // populated queue - } - return q -} - -// Wait returns a channel that's signaled when the *Queue[T] contains at least one item. -// When the *Queue[T] is no longer required, you MUST call Release() to relinquish acquisition. -func (h *Holder[T]) Wait() <-chan *Queue[T] { - return h.populated -} - -// Release returns the *Queue[T] back to the Holder[T]. -// Once the *Queue[T] has been released, it is no longer safe to call its methods. -func (h *Holder[T]) Release(q *Queue[T]) { - if q.Len() == 0 { - h.empty <- q - } else { - h.populated <- q - } -} - -// Len returns the length of the *Queue[T]. -func (h *Holder[T]) Len() int { - msgLen := 0 - select { - case q := <-h.empty: - h.empty <- q - case q := <-h.populated: - msgLen = q.Len() - h.populated <- q - } - return msgLen -} - -// Queue[T] is a segmented FIFO queue of Ts. -type Queue[T any] struct { - head *ring.Ring - tail *ring.Ring - size int -} - -// New creates a new instance of Queue[T]. -// - size is the size of each Queue segment -func New[T any](size int) *Queue[T] { - r := &ring.Ring{ - Value: &segment[T]{ - items: make([]*T, size), - }, - } - return &Queue[T]{ - head: r, - tail: r, - } -} - -// Enqueue adds the specified item to the end of the queue. -// If the current segment is full, a new segment is created. -func (q *Queue[T]) Enqueue(item T) { - for { - r := q.tail - seg := r.Value.(*segment[T]) - - if seg.tail < len(seg.items) { - seg.items[seg.tail] = &item - seg.tail++ - q.size++ - return - } - - // segment is full, can we advance? - if next := r.Next(); next != q.head { - q.tail = next - continue - } - - // no, add a new ring - r.Link(&ring.Ring{ - Value: &segment[T]{ - items: make([]*T, len(seg.items)), - }, - }) - - q.tail = r.Next() - } -} - -// Dequeue removes and returns the item from the front of the queue. -func (q *Queue[T]) Dequeue() *T { - r := q.head - seg := r.Value.(*segment[T]) - - if seg.tail == 0 { - // queue is empty - return nil - } - - // remove first item - item := seg.items[seg.head] - seg.items[seg.head] = nil - seg.head++ - q.size-- - - if seg.head == seg.tail { - // segment is now empty, reset indices - seg.head, seg.tail = 0, 0 - - // if we're not at the last ring, advance head to the next one - if q.head != q.tail { - q.head = r.Next() - } - } - - return item -} - -// Len returns the total count of enqueued items. -func (q *Queue[T]) Len() int { - return q.size -} - -type segment[T any] struct { - items []*T - head int - tail int -} diff --git a/sdk/messaging/azservicebus/internal/go-amqp/internal/shared/shared.go b/sdk/messaging/azservicebus/internal/go-amqp/internal/shared/shared.go deleted file mode 100644 index 867c1e932bf5..000000000000 --- a/sdk/messaging/azservicebus/internal/go-amqp/internal/shared/shared.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (c) Microsoft Corporation - -package shared - -import ( - "encoding/base64" - "math/rand" - "sync" - "time" -) - -// lockedRand provides a rand source that is safe for concurrent use. -type lockedRand struct { - mu sync.Mutex - src *rand.Rand -} - -func (r *lockedRand) Read(p []byte) (int, error) { - r.mu.Lock() - defer r.mu.Unlock() - return r.src.Read(p) -} - -// package scoped rand source to avoid any issues with seeding -// of the global source. -var pkgRand = &lockedRand{ - src: rand.New(rand.NewSource(time.Now().UnixNano())), -} - -// RandString returns a base64 encoded string of n bytes. -func RandString(n int) string { - b := make([]byte, n) - // from math/rand, cannot fail - _, _ = pkgRand.Read(b) - return base64.RawURLEncoding.EncodeToString(b) -} diff --git a/sdk/messaging/azservicebus/internal/go-amqp/link.go b/sdk/messaging/azservicebus/internal/go-amqp/link.go deleted file mode 100644 index 1965b75049ed..000000000000 --- a/sdk/messaging/azservicebus/internal/go-amqp/link.go +++ /dev/null @@ -1,390 +0,0 @@ -// Copyright (C) 2017 Kale Blankenship -// Portions Copyright (c) Microsoft Corporation - -package amqp - -import ( - "context" - "errors" - "fmt" - "sync" - - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp/internal/debug" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp/internal/encoding" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp/internal/frames" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp/internal/queue" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp/internal/shared" -) - -// linkKey uniquely identifies a link on a connection by name and direction. -// -// A link can be identified uniquely by the ordered tuple -// -// (source-container-id, target-container-id, name) -// -// On a single connection the container ID pairs can be abbreviated -// to a boolean flag indicating the direction of the link. -type linkKey struct { - name string - role encoding.Role // Local role: sender/receiver -} - -// link contains the common state and methods for sending and receiving links -type link struct { - key linkKey // Name and direction - handle uint32 // our handle - remoteHandle uint32 // remote's handle - dynamicAddr bool // request a dynamic link address from the server - - // frames destined for this link are added to this queue by Session.muxFrameToLink - rxQ *queue.Holder[frames.FrameBody] - - // used for gracefully closing link - close chan struct{} // signals a link's mux to shut down; DO NOT use this to check if a link has terminated, use done instead - closeOnce *sync.Once // closeOnce protects close from being closed multiple times - - done chan struct{} // closed when the link has terminated (mux exited); DO NOT wait on this from within a link's mux() as it will never trigger! - doneErr error // contains the mux error state; ONLY written to by the mux and MUST only be read from after done is closed! - closeErr error // contains the error state returned from closeLink(); ONLY closeLink() reads/writes this! - - session *Session // parent session - source *frames.Source // used for Receiver links - target *frames.Target // used for Sender links - properties map[encoding.Symbol]any // additional properties sent upon link attach - - // "The delivery-count is initialized by the sender when a link endpoint is created, - // and is incremented whenever a message is sent. Only the sender MAY independently - // modify this field. The receiver's value is calculated based on the last known - // value from the sender and any subsequent messages received on the link. Note that, - // despite its name, the delivery-count is not a count but a sequence number - // initialized at an arbitrary point by the sender." - deliveryCount uint32 - - // The current maximum number of messages that can be handled at the receiver endpoint of the link. Only the receiver endpoint - // can independently set this value. The sender endpoint sets this to the last known value seen from the receiver. - linkCredit uint32 - - senderSettleMode *SenderSettleMode - receiverSettleMode *ReceiverSettleMode - maxMessageSize uint64 - - closeInProgress bool // indicates that the detach performative has been sent -} - -func newLink(s *Session, r encoding.Role) link { - l := link{ - key: linkKey{shared.RandString(40), r}, - session: s, - close: make(chan struct{}), - closeOnce: &sync.Once{}, - done: make(chan struct{}), - } - - // set the segment size relative to respective window - var segmentSize int - if r == encoding.RoleReceiver { - segmentSize = int(s.incomingWindow) - } else { - segmentSize = int(s.outgoingWindow) - } - - l.rxQ = queue.NewHolder(queue.New[frames.FrameBody](segmentSize)) - return l -} - -// waitForFrame waits for an incoming frame to be queued. -// it returns the next frame from the queue, or an error. -// the error is either from the context or session.doneErr. -// not meant for consumption outside of link.go. -func (l *link) waitForFrame(ctx context.Context) (frames.FrameBody, error) { - select { - case <-ctx.Done(): - return nil, ctx.Err() - case <-l.session.done: - // session has terminated, no need to deallocate in this case - return nil, l.session.doneErr - case q := <-l.rxQ.Wait(): - // frame received - fr := q.Dequeue() - l.rxQ.Release(q) - return *fr, nil - } -} - -// attach sends the Attach performative to establish the link with its parent session. -// this is automatically called by the new*Link constructors. -func (l *link) attach(ctx context.Context, beforeAttach func(*frames.PerformAttach), afterAttach func(*frames.PerformAttach)) error { - if err := l.session.freeAbandonedLinks(ctx); err != nil { - return err - } - - // once the abandoned links have been cleaned up we can create our link - if err := l.session.allocateHandle(ctx, l); err != nil { - return err - } - - attach := &frames.PerformAttach{ - Name: l.key.name, - Handle: l.handle, - ReceiverSettleMode: l.receiverSettleMode, - SenderSettleMode: l.senderSettleMode, - MaxMessageSize: l.maxMessageSize, - Source: l.source, - Target: l.target, - Properties: l.properties, - } - - // link-specific configuration of the attach frame - beforeAttach(attach) - - if err := l.txFrameAndWait(ctx, attach); err != nil { - return err - } - - // wait for response - fr, err := l.waitForFrame(ctx) - if err != nil { - l.session.abandonLink(l) - return err - } - - resp, ok := fr.(*frames.PerformAttach) - if !ok { - debug.Log(1, "RX (link %p): unexpected attach response frame %T", l, fr) - if err := l.session.conn.Close(); err != nil { - return err - } - return &ConnError{inner: fmt.Errorf("unexpected attach response: %#v", fr)} - } - - // If the remote encounters an error during the attach it returns an Attach - // with no Source or Target. The remote then sends a Detach with an error. - // - // Note that if the application chooses not to create a terminus, the session - // endpoint will still create a link endpoint and issue an attach indicating - // that the link endpoint has no associated local terminus. In this case, the - // session endpoint MUST immediately detach the newly created link endpoint. - // - // http://docs.oasis-open.org/amqp/core/v1.0/csprd01/amqp-core-transport-v1.0-csprd01.html#doc-idp386144 - if resp.Source == nil && resp.Target == nil { - // wait for detach - fr, err := l.waitForFrame(ctx) - if err != nil { - // we timed out waiting for the peer to close the link, this really isn't an abandoned link. - // however, we still need to send the detach performative to ack the peer. - l.session.abandonLink(l) - return err - } - - detach, ok := fr.(*frames.PerformDetach) - if !ok { - if err := l.session.conn.Close(); err != nil { - return err - } - return &ConnError{inner: fmt.Errorf("unexpected frame while waiting for detach: %#v", fr)} - } - - // send return detach - fr = &frames.PerformDetach{ - Handle: l.handle, - Closed: true, - } - if err := l.txFrameAndWait(ctx, fr); err != nil { - return err - } - - if detach.Error == nil { - return fmt.Errorf("received detach with no error specified") - } - return detach.Error - } - - if l.maxMessageSize == 0 || resp.MaxMessageSize < l.maxMessageSize { - l.maxMessageSize = resp.MaxMessageSize - } - - // link-specific configuration post attach - afterAttach(resp) - - if err := l.setSettleModes(resp); err != nil { - // close the link as there's a mismatch on requested/supported settlement modes - dr := &frames.PerformDetach{ - Handle: l.handle, - Closed: true, - } - if err := l.txFrameAndWait(ctx, dr); err != nil { - return err - } - return err - } - - return nil -} - -// setSettleModes sets the settlement modes based on the resp frames.PerformAttach. -// -// If a settlement mode has been explicitly set locally and it was not honored by the -// server an error is returned. -func (l *link) setSettleModes(resp *frames.PerformAttach) error { - var ( - localRecvSettle = receiverSettleModeValue(l.receiverSettleMode) - respRecvSettle = receiverSettleModeValue(resp.ReceiverSettleMode) - ) - if l.receiverSettleMode != nil && localRecvSettle != respRecvSettle { - return fmt.Errorf("amqp: receiver settlement mode %q requested, received %q from server", l.receiverSettleMode, &respRecvSettle) - } - l.receiverSettleMode = &respRecvSettle - - var ( - localSendSettle = senderSettleModeValue(l.senderSettleMode) - respSendSettle = senderSettleModeValue(resp.SenderSettleMode) - ) - if l.senderSettleMode != nil && localSendSettle != respSendSettle { - return fmt.Errorf("amqp: sender settlement mode %q requested, received %q from server", l.senderSettleMode, &respSendSettle) - } - l.senderSettleMode = &respSendSettle - - return nil -} - -// muxHandleFrame processes fr based on type. -func (l *link) muxHandleFrame(fr frames.FrameBody) error { - switch fr := fr.(type) { - case *frames.PerformDetach: - if !fr.Closed { - l.closeWithError(ErrCondNotImplemented, fmt.Sprintf("non-closing detach not supported: %+v", fr)) - return nil - } - - // there are two possibilities: - // - this is the ack to a client-side Close() - // - the peer is closing the link so we must ack - - if l.closeInProgress { - // if the client-side close was initiated due to an error (l.closeWithError) - // then l.doneErr will already be set. in this case, return that error instead - // of an empty LinkError which indicates a clean client-side close. - if l.doneErr != nil { - return l.doneErr - } - return &LinkError{} - } - - dr := &frames.PerformDetach{ - Handle: l.handle, - Closed: true, - } - l.txFrame(context.Background(), dr, nil) - return &LinkError{RemoteErr: fr.Error} - - default: - debug.Log(1, "RX (link %p): unexpected frame: %s", l, fr) - l.closeWithError(ErrCondInternalError, fmt.Sprintf("link received unexpected frame %T", fr)) - return nil - } -} - -// Close closes the Sender and AMQP link. -func (l *link) closeLink(ctx context.Context) error { - var ctxErr error - l.closeOnce.Do(func() { - close(l.close) - - // once the mux has received the ack'ing detach performative, the mux will - // exit which deletes the link and closes l.done. - select { - case <-l.done: - l.closeErr = l.doneErr - case <-ctx.Done(): - // notify the caller that the close timed out/was cancelled. - // the mux will remain running and once the ack is received it will terminate. - ctxErr = ctx.Err() - - // record that the close timed out/was cancelled. - // subsequent calls to closeLink() will return this - debug.Log(1, "TX (link %p) closing %s: %v", l, l.key.name, ctxErr) - l.closeErr = &LinkError{inner: ctxErr} - } - }) - - if ctxErr != nil { - return ctxErr - } - - var linkErr *LinkError - if errors.As(l.closeErr, &linkErr) && linkErr.RemoteErr == nil && linkErr.inner == nil { - // an empty LinkError means the link was cleanly closed by the caller - return nil - } - return l.closeErr -} - -// closeWithError initiates closing the link with the specified AMQP error. -// the mux must continue to run until the ack'ing detach is received. -// l.doneErr is populated with a &LinkError{} containing an inner error constructed from the specified values -// - cnd is the AMQP error condition -// - desc is the error description -func (l *link) closeWithError(cnd ErrCond, desc string) { - amqpErr := &Error{Condition: cnd, Description: desc} - if l.closeInProgress { - debug.Log(3, "TX (link %p) close error already pending, discarding %v", l, amqpErr) - return - } - - dr := &frames.PerformDetach{ - Handle: l.handle, - Closed: true, - Error: amqpErr, - } - l.closeInProgress = true - l.doneErr = &LinkError{inner: fmt.Errorf("%s: %s", cnd, desc)} - l.txFrame(context.Background(), dr, nil) -} - -// txFrame sends the specified frame via the link's session. -// you MUST call this instead of session.txFrame() to ensure -// that frames are not sent during session shutdown. -func (l *link) txFrame(ctx context.Context, fr frames.FrameBody, sent chan error) { - // NOTE: there is no need to select on l.done as this is either - // called from a link's mux or before the mux has even started. - select { - case <-l.session.done: - if sent != nil { - sent <- l.session.doneErr - } - case <-l.session.endSent: - // we swallow this to prevent the link's mux from terminating. - // l.session.done will soon close so this is temporary. - return - case l.session.tx <- frameBodyEnvelope{Ctx: ctx, FrameBody: fr, Sent: sent}: - debug.Log(2, "TX (link %p): mux frame to Session (%p): %s", l, l.session, fr) - } -} - -// txFrame sends the specified frame via the link's session. -// you MUST call this instead of session.txFrame() to ensure -// that frames are not sent during session shutdown. -func (l *link) txFrameAndWait(ctx context.Context, fr frames.FrameBody) error { - // NOTE: there is no need to select on l.done as this is either - // called from a link's mux or before the mux has even started. - sent := make(chan error, 1) - select { - case <-l.session.done: - return l.session.doneErr - case <-l.session.endSent: - // we swallow this to prevent the link's mux from terminating. - // l.session.done will soon close so this is temporary. - return nil - case l.session.tx <- frameBodyEnvelope{Ctx: ctx, FrameBody: fr, Sent: sent}: - debug.Log(2, "TX (link %p): mux frame to Session (%p): %s", l, l.session, fr) - } - - select { - case err := <-sent: - return err - case <-l.done: - return l.doneErr - case <-l.session.done: - return l.session.doneErr - } -} diff --git a/sdk/messaging/azservicebus/internal/go-amqp/link_options.go b/sdk/messaging/azservicebus/internal/go-amqp/link_options.go deleted file mode 100644 index 31daa94eb9f6..000000000000 --- a/sdk/messaging/azservicebus/internal/go-amqp/link_options.go +++ /dev/null @@ -1,241 +0,0 @@ -// Copyright (C) 2017 Kale Blankenship -// Portions Copyright (c) Microsoft Corporation - -package amqp - -import ( - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp/internal/encoding" -) - -type SenderOptions struct { - // Capabilities is the list of extension capabilities the sender supports. - Capabilities []string - - // Durability indicates what state of the sender will be retained durably. - // - // Default: DurabilityNone. - Durability Durability - - // DynamicAddress indicates a dynamic address is to be used. - // Any specified address will be ignored. - // - // Default: false. - DynamicAddress bool - - // ExpiryPolicy determines when the expiry timer of the sender starts counting - // down from the timeout value. If the link is subsequently re-attached before - // the timeout is reached, the count down is aborted. - // - // Default: ExpirySessionEnd. - ExpiryPolicy ExpiryPolicy - - // ExpiryTimeout is the duration in seconds that the sender will be retained. - // - // Default: 0. - ExpiryTimeout uint32 - - // Name sets the name of the link. - // - // Link names must be unique per-connection and direction. - // - // Default: randomly generated. - Name string - - // Properties sets an entry in the link properties map sent to the server. - Properties map[string]any - - // RequestedReceiverSettleMode sets the requested receiver settlement mode. - // - // If a settlement mode is explicitly set and the server does not - // honor it an error will be returned during link attachment. - // - // Default: Accept the settlement mode set by the server, commonly ModeFirst. - RequestedReceiverSettleMode *ReceiverSettleMode - - // SettlementMode sets the settlement mode in use by this sender. - // - // Default: ModeMixed. - SettlementMode *SenderSettleMode - - // SourceAddress specifies the source address for this sender. - SourceAddress string - - // TargetCapabilities is the list of extension capabilities the sender desires. - TargetCapabilities []string - - // TargetDurability indicates what state of the peer will be retained durably. - // - // Default: DurabilityNone. - TargetDurability Durability - - // TargetExpiryPolicy determines when the expiry timer of the peer starts counting - // down from the timeout value. If the link is subsequently re-attached before - // the timeout is reached, the count down is aborted. - // - // Default: ExpirySessionEnd. - TargetExpiryPolicy ExpiryPolicy - - // TargetExpiryTimeout is the duration in seconds that the peer will be retained. - // - // Default: 0. - TargetExpiryTimeout uint32 -} - -type ReceiverOptions struct { - // Capabilities is the list of extension capabilities the receiver supports. - Capabilities []string - - // Credit specifies the maximum number of unacknowledged messages - // the sender can transmit. Once this limit is reached, no more messages - // will arrive until messages are acknowledged and settled. - // - // As messages are settled, any available credit will automatically be issued. - // - // Setting this to -1 requires manual management of link credit. - // Credits can be added with IssueCredit(), and links can also be - // drained with DrainCredit(). - // This should only be enabled when complete control of the link's - // flow control is required. - // - // Default: 1. - Credit int32 - - // Durability indicates what state of the receiver will be retained durably. - // - // Default: DurabilityNone. - Durability Durability - - // DynamicAddress indicates a dynamic address is to be used. - // Any specified address will be ignored. - // - // Default: false. - DynamicAddress bool - - // ExpiryPolicy determines when the expiry timer of the sender starts counting - // down from the timeout value. If the link is subsequently re-attached before - // the timeout is reached, the count down is aborted. - // - // Default: ExpirySessionEnd. - ExpiryPolicy ExpiryPolicy - - // ExpiryTimeout is the duration in seconds that the sender will be retained. - // - // Default: 0. - ExpiryTimeout uint32 - - // Filters contains the desired filters for this receiver. - // If the peer cannot fulfill the filters the link will be detached. - Filters []LinkFilter - - // MaxMessageSize sets the maximum message size that can - // be received on the link. - // - // A size of zero indicates no limit. - // - // Default: 0. - MaxMessageSize uint64 - - // Name sets the name of the link. - // - // Link names must be unique per-connection and direction. - // - // Default: randomly generated. - Name string - - // Properties sets an entry in the link properties map sent to the server. - Properties map[string]any - - // RequestedSenderSettleMode sets the requested sender settlement mode. - // - // If a settlement mode is explicitly set and the server does not - // honor it an error will be returned during link attachment. - // - // Default: Accept the settlement mode set by the server, commonly ModeMixed. - RequestedSenderSettleMode *SenderSettleMode - - // SettlementMode sets the settlement mode in use by this receiver. - // - // Default: ModeFirst. - SettlementMode *ReceiverSettleMode - - // TargetAddress specifies the target address for this receiver. - TargetAddress string - - // SourceCapabilities is the list of extension capabilities the receiver desires. - SourceCapabilities []string - - // SourceDurability indicates what state of the peer will be retained durably. - // - // Default: DurabilityNone. - SourceDurability Durability - - // SourceExpiryPolicy determines when the expiry timer of the peer starts counting - // down from the timeout value. If the link is subsequently re-attached before - // the timeout is reached, the count down is aborted. - // - // Default: ExpirySessionEnd. - SourceExpiryPolicy ExpiryPolicy - - // SourceExpiryTimeout is the duration in seconds that the peer will be retained. - // - // Default: 0. - SourceExpiryTimeout uint32 -} - -// LinkFilter is an advanced API for setting non-standard source filters. -// Please file an issue or open a PR if a standard filter is missing from this -// library. -// -// The name is the key for the filter map. It will be encoded as an AMQP symbol type. -// -// The code is the descriptor of the described type value. The domain-id and descriptor-id -// should be concatenated together. If 0 is passed as the code, the name will be used as -// the descriptor. -// -// The value is the value of the descriped types. Acceptable types for value are specific -// to the filter. -// -// Example: -// -// The standard selector-filter is defined as: -// -// -// -// In this case the name is "apache.org:selector-filter:string" and the code is -// 0x0000468C00000004. -// -// LinkSourceFilter("apache.org:selector-filter:string", 0x0000468C00000004, exampleValue) -// -// References: -// -// http://docs.oasis-open.org/amqp/core/v1.0/os/amqp-core-messaging-v1.0-os.html#type-filter-set -// http://docs.oasis-open.org/amqp/core/v1.0/os/amqp-core-types-v1.0-os.html#section-descriptor-values -type LinkFilter func(encoding.Filter) - -// NewLinkFilter creates a new LinkFilter with the specified values. -// Any preexisting link filter with the same name will be updated with the new code and value. -func NewLinkFilter(name string, code uint64, value any) LinkFilter { - return func(f encoding.Filter) { - var descriptor any - if code != 0 { - descriptor = code - } else { - descriptor = encoding.Symbol(name) - } - f[encoding.Symbol(name)] = &encoding.DescribedType{ - Descriptor: descriptor, - Value: value, - } - } -} - -// NewSelectorFilter creates a new selector filter (apache.org:selector-filter:string) with the specified filter value. -// Any preexisting selector filter will be updated with the new filter value. -func NewSelectorFilter(filter string) LinkFilter { - return NewLinkFilter(selectorFilter, selectorFilterCode, filter) -} - -const ( - selectorFilter = "apache.org:selector-filter:string" - selectorFilterCode = uint64(0x0000468C00000004) -) diff --git a/sdk/messaging/azservicebus/internal/go-amqp/message.go b/sdk/messaging/azservicebus/internal/go-amqp/message.go deleted file mode 100644 index 404a3cc186da..000000000000 --- a/sdk/messaging/azservicebus/internal/go-amqp/message.go +++ /dev/null @@ -1,492 +0,0 @@ -// Copyright (C) 2017 Kale Blankenship -// Portions Copyright (c) Microsoft Corporation - -package amqp - -import ( - "fmt" - "time" - - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp/internal/buffer" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp/internal/encoding" -) - -// Message is an AMQP message. -type Message struct { - // Message format code. - // - // The upper three octets of a message format code identify a particular message - // format. The lowest octet indicates the version of said message format. Any - // given version of a format is forwards compatible with all higher versions. - Format uint32 - - // The DeliveryTag can be up to 32 octets of binary data. - // Note that when mode one is enabled there will be no delivery tag. - DeliveryTag []byte - - // The header section carries standard delivery details about the transfer - // of a message through the AMQP network. - Header *MessageHeader - // If the header section is omitted the receiver MUST assume the appropriate - // default values (or the meaning implied by no value being set) for the - // fields within the header unless other target or node specific defaults - // have otherwise been set. - - // The delivery-annotations section is used for delivery-specific non-standard - // properties at the head of the message. Delivery annotations convey information - // from the sending peer to the receiving peer. - DeliveryAnnotations Annotations - // If the recipient does not understand the annotation it cannot be acted upon - // and its effects (such as any implied propagation) cannot be acted upon. - // Annotations might be specific to one implementation, or common to multiple - // implementations. The capabilities negotiated on link attach and on the source - // and target SHOULD be used to establish which annotations a peer supports. A - // registry of defined annotations and their meanings is maintained [AMQPDELANN]. - // The symbolic key "rejected" is reserved for the use of communicating error - // information regarding rejected messages. Any values associated with the - // "rejected" key MUST be of type error. - // - // If the delivery-annotations section is omitted, it is equivalent to a - // delivery-annotations section containing an empty map of annotations. - - // The message-annotations section is used for properties of the message which - // are aimed at the infrastructure. - Annotations Annotations - // The message-annotations section is used for properties of the message which - // are aimed at the infrastructure and SHOULD be propagated across every - // delivery step. Message annotations convey information about the message. - // Intermediaries MUST propagate the annotations unless the annotations are - // explicitly augmented or modified (e.g., by the use of the modified outcome). - // - // The capabilities negotiated on link attach and on the source and target can - // be used to establish which annotations a peer understands; however, in a - // network of AMQP intermediaries it might not be possible to know if every - // intermediary will understand the annotation. Note that for some annotations - // it might not be necessary for the intermediary to understand their purpose, - // i.e., they could be used purely as an attribute which can be filtered on. - // - // A registry of defined annotations and their meanings is maintained [AMQPMESSANN]. - // - // If the message-annotations section is omitted, it is equivalent to a - // message-annotations section containing an empty map of annotations. - - // The properties section is used for a defined set of standard properties of - // the message. - Properties *MessageProperties - // The properties section is part of the bare message; therefore, - // if retransmitted by an intermediary, it MUST remain unaltered. - - // The application-properties section is a part of the bare message used for - // structured application data. Intermediaries can use the data within this - // structure for the purposes of filtering or routing. - ApplicationProperties map[string]any - // The keys of this map are restricted to be of type string (which excludes - // the possibility of a null key) and the values are restricted to be of - // simple types only, that is, excluding map, list, and array types. - - // Data payloads. - // A data section contains opaque binary data. - Data [][]byte - - // Value payload. - // An amqp-value section contains a single AMQP value. - Value any - - // Sequence will contain AMQP sequence sections from the body of the message. - // An amqp-sequence section contains an AMQP sequence. - Sequence [][]any - - // The footer section is used for details about the message or delivery which - // can only be calculated or evaluated once the whole bare message has been - // constructed or seen (for example message hashes, HMACs, signatures and - // encryption details). - Footer Annotations - - deliveryID uint32 // used when sending disposition - settled bool // whether transfer was settled by sender -} - -// NewMessage returns a *Message with data as the payload. -// -// This constructor is intended as a helper for basic Messages with a -// single data payload. It is valid to construct a Message directly for -// more complex usages. -func NewMessage(data []byte) *Message { - return &Message{ - Data: [][]byte{data}, - } -} - -// GetData returns the first []byte from the Data field -// or nil if Data is empty. -func (m *Message) GetData() []byte { - if len(m.Data) < 1 { - return nil - } - return m.Data[0] -} - -// MarshalBinary encodes the message into binary form. -func (m *Message) MarshalBinary() ([]byte, error) { - buf := &buffer.Buffer{} - err := m.Marshal(buf) - return buf.Detach(), err -} - -func (m *Message) Marshal(wr *buffer.Buffer) error { - if m.Header != nil { - err := m.Header.Marshal(wr) - if err != nil { - return err - } - } - - if m.DeliveryAnnotations != nil { - encoding.WriteDescriptor(wr, encoding.TypeCodeDeliveryAnnotations) - err := encoding.Marshal(wr, m.DeliveryAnnotations) - if err != nil { - return err - } - } - - if m.Annotations != nil { - encoding.WriteDescriptor(wr, encoding.TypeCodeMessageAnnotations) - err := encoding.Marshal(wr, m.Annotations) - if err != nil { - return err - } - } - - if m.Properties != nil { - err := encoding.Marshal(wr, m.Properties) - if err != nil { - return err - } - } - - if m.ApplicationProperties != nil { - encoding.WriteDescriptor(wr, encoding.TypeCodeApplicationProperties) - err := encoding.Marshal(wr, m.ApplicationProperties) - if err != nil { - return err - } - } - - for _, data := range m.Data { - encoding.WriteDescriptor(wr, encoding.TypeCodeApplicationData) - err := encoding.WriteBinary(wr, data) - if err != nil { - return err - } - } - - if m.Value != nil { - encoding.WriteDescriptor(wr, encoding.TypeCodeAMQPValue) - err := encoding.Marshal(wr, m.Value) - if err != nil { - return err - } - } - - if m.Sequence != nil { - // the body can basically be one of three different types (value, data or sequence). - // When it's sequence it's actually _several_ sequence sections, one for each sub-array. - for _, v := range m.Sequence { - encoding.WriteDescriptor(wr, encoding.TypeCodeAMQPSequence) - err := encoding.Marshal(wr, v) - if err != nil { - return err - } - } - } - - if m.Footer != nil { - encoding.WriteDescriptor(wr, encoding.TypeCodeFooter) - err := encoding.Marshal(wr, m.Footer) - if err != nil { - return err - } - } - - return nil -} - -// UnmarshalBinary decodes the message from binary form. -func (m *Message) UnmarshalBinary(data []byte) error { - buf := buffer.New(data) - return m.Unmarshal(buf) -} - -func (m *Message) Unmarshal(r *buffer.Buffer) error { - // loop, decoding sections until bytes have been consumed - for r.Len() > 0 { - // determine type - type_, headerLength, err := encoding.PeekMessageType(r.Bytes()) - if err != nil { - return err - } - - var ( - section any - // section header is read from r before - // unmarshaling section is set to true - discardHeader = true - ) - switch encoding.AMQPType(type_) { - - case encoding.TypeCodeMessageHeader: - discardHeader = false - section = &m.Header - - case encoding.TypeCodeDeliveryAnnotations: - section = &m.DeliveryAnnotations - - case encoding.TypeCodeMessageAnnotations: - section = &m.Annotations - - case encoding.TypeCodeMessageProperties: - discardHeader = false - section = &m.Properties - - case encoding.TypeCodeApplicationProperties: - section = &m.ApplicationProperties - - case encoding.TypeCodeApplicationData: - r.Skip(int(headerLength)) - - var data []byte - err = encoding.Unmarshal(r, &data) - if err != nil { - return err - } - - m.Data = append(m.Data, data) - continue - - case encoding.TypeCodeAMQPSequence: - r.Skip(int(headerLength)) - - var data []any - err = encoding.Unmarshal(r, &data) - if err != nil { - return err - } - - m.Sequence = append(m.Sequence, data) - continue - - case encoding.TypeCodeFooter: - section = &m.Footer - - case encoding.TypeCodeAMQPValue: - section = &m.Value - - default: - return fmt.Errorf("unknown message section %#02x", type_) - } - - if discardHeader { - r.Skip(int(headerLength)) - } - - err = encoding.Unmarshal(r, section) - if err != nil { - return err - } - } - return nil -} - -/* - - - - - - - - -*/ - -// MessageHeader carries standard delivery details about the transfer -// of a message. -type MessageHeader struct { - Durable bool - Priority uint8 - TTL time.Duration // from milliseconds - FirstAcquirer bool - DeliveryCount uint32 -} - -func (h *MessageHeader) Marshal(wr *buffer.Buffer) error { - return encoding.MarshalComposite(wr, encoding.TypeCodeMessageHeader, []encoding.MarshalField{ - {Value: &h.Durable, Omit: !h.Durable}, - {Value: &h.Priority, Omit: h.Priority == 4}, - {Value: (*encoding.Milliseconds)(&h.TTL), Omit: h.TTL == 0}, - {Value: &h.FirstAcquirer, Omit: !h.FirstAcquirer}, - {Value: &h.DeliveryCount, Omit: h.DeliveryCount == 0}, - }) -} - -func (h *MessageHeader) Unmarshal(r *buffer.Buffer) error { - return encoding.UnmarshalComposite(r, encoding.TypeCodeMessageHeader, []encoding.UnmarshalField{ - {Field: &h.Durable}, - {Field: &h.Priority, HandleNull: func() error { h.Priority = 4; return nil }}, - {Field: (*encoding.Milliseconds)(&h.TTL)}, - {Field: &h.FirstAcquirer}, - {Field: &h.DeliveryCount}, - }...) -} - -/* - - - - - - - - - - - - - - - - -*/ - -// MessageProperties is the defined set of properties for AMQP messages. -type MessageProperties struct { - // Message-id, if set, uniquely identifies a message within the message system. - // The message producer is usually responsible for setting the message-id in - // such a way that it is assured to be globally unique. A broker MAY discard a - // message as a duplicate if the value of the message-id matches that of a - // previously received message sent to the same node. - // - // The value is restricted to the following types - // - uint64, UUID, []byte, or string - MessageID any - - // The identity of the user responsible for producing the message. - // The client sets this value, and it MAY be authenticated by intermediaries. - UserID []byte - - // The to field identifies the node that is the intended destination of the message. - // On any given transfer this might not be the node at the receiving end of the link. - To *string - - // A common field for summary information about the message content and purpose. - Subject *string - - // The address of the node to send replies to. - ReplyTo *string - - // This is a client-specific id that can be used to mark or identify messages - // between clients. - // - // The value is restricted to the following types - // - uint64, UUID, []byte, or string - CorrelationID any - - // The RFC-2046 [RFC2046] MIME type for the message's application-data section - // (body). As per RFC-2046 [RFC2046] this can contain a charset parameter defining - // the character encoding used: e.g., 'text/plain; charset="utf-8"'. - // - // For clarity, as per section 7.2.1 of RFC-2616 [RFC2616], where the content type - // is unknown the content-type SHOULD NOT be set. This allows the recipient the - // opportunity to determine the actual type. Where the section is known to be truly - // opaque binary data, the content-type SHOULD be set to application/octet-stream. - // - // When using an application-data section with a section code other than data, - // content-type SHOULD NOT be set. - ContentType *string - - // The content-encoding property is used as a modifier to the content-type. - // When present, its value indicates what additional content encodings have been - // applied to the application-data, and thus what decoding mechanisms need to be - // applied in order to obtain the media-type referenced by the content-type header - // field. - // - // Content-encoding is primarily used to allow a document to be compressed without - // losing the identity of its underlying content type. - // - // Content-encodings are to be interpreted as per section 3.5 of RFC 2616 [RFC2616]. - // Valid content-encodings are registered at IANA [IANAHTTPPARAMS]. - // - // The content-encoding MUST NOT be set when the application-data section is other - // than data. The binary representation of all other application-data section types - // is defined completely in terms of the AMQP type system. - // - // Implementations MUST NOT use the identity encoding. Instead, implementations - // SHOULD NOT set this property. Implementations SHOULD NOT use the compress encoding, - // except as to remain compatible with messages originally sent with other protocols, - // e.g. HTTP or SMTP. - // - // Implementations SHOULD NOT specify multiple content-encoding values except as to - // be compatible with messages originally sent with other protocols, e.g. HTTP or SMTP. - ContentEncoding *string - - // An absolute time when this message is considered to be expired. - AbsoluteExpiryTime *time.Time - - // An absolute time when this message was created. - CreationTime *time.Time - - // Identifies the group the message belongs to. - GroupID *string - - // The relative position of this message within its group. - // - // The value is defined as a RFC-1982 sequence number - GroupSequence *uint32 - - // This is a client-specific id that is used so that client can send replies to this - // message to a specific group. - ReplyToGroupID *string -} - -func (p *MessageProperties) Marshal(wr *buffer.Buffer) error { - return encoding.MarshalComposite(wr, encoding.TypeCodeMessageProperties, []encoding.MarshalField{ - {Value: p.MessageID, Omit: p.MessageID == nil}, - {Value: &p.UserID, Omit: len(p.UserID) == 0}, - {Value: p.To, Omit: p.To == nil}, - {Value: p.Subject, Omit: p.Subject == nil}, - {Value: p.ReplyTo, Omit: p.ReplyTo == nil}, - {Value: p.CorrelationID, Omit: p.CorrelationID == nil}, - {Value: (*encoding.Symbol)(p.ContentType), Omit: p.ContentType == nil}, - {Value: (*encoding.Symbol)(p.ContentEncoding), Omit: p.ContentEncoding == nil}, - {Value: p.AbsoluteExpiryTime, Omit: p.AbsoluteExpiryTime == nil}, - {Value: p.CreationTime, Omit: p.CreationTime == nil}, - {Value: p.GroupID, Omit: p.GroupID == nil}, - {Value: p.GroupSequence, Omit: p.GroupSequence == nil}, - {Value: p.ReplyToGroupID, Omit: p.ReplyToGroupID == nil}, - }) -} - -func (p *MessageProperties) Unmarshal(r *buffer.Buffer) error { - return encoding.UnmarshalComposite(r, encoding.TypeCodeMessageProperties, []encoding.UnmarshalField{ - {Field: &p.MessageID}, - {Field: &p.UserID}, - {Field: &p.To}, - {Field: &p.Subject}, - {Field: &p.ReplyTo}, - {Field: &p.CorrelationID}, - {Field: &p.ContentType}, - {Field: &p.ContentEncoding}, - {Field: &p.AbsoluteExpiryTime}, - {Field: &p.CreationTime}, - {Field: &p.GroupID}, - {Field: &p.GroupSequence}, - {Field: &p.ReplyToGroupID}, - }...) -} - -// Annotations keys must be of type string, int, or int64. -// -// String keys are encoded as AMQP Symbols. -type Annotations = encoding.Annotations - -// UUID is a 128 bit identifier as defined in RFC 4122. -type UUID = encoding.UUID diff --git a/sdk/messaging/azservicebus/internal/go-amqp/receiver.go b/sdk/messaging/azservicebus/internal/go-amqp/receiver.go deleted file mode 100644 index 8bcd7b293ca4..000000000000 --- a/sdk/messaging/azservicebus/internal/go-amqp/receiver.go +++ /dev/null @@ -1,897 +0,0 @@ -// Copyright (C) 2017 Kale Blankenship -// Portions Copyright (c) Microsoft Corporation - -package amqp - -import ( - "bytes" - "context" - "errors" - "fmt" - "sync" - - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp/internal/buffer" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp/internal/debug" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp/internal/encoding" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp/internal/frames" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp/internal/queue" -) - -// Default link options -const ( - defaultLinkCredit = 1 -) - -// Receiver receives messages on a single AMQP link. -type Receiver struct { - l link - - // message receiving - receiverReady chan struct{} // receiver sends on this when mux is paused to indicate it can handle more messages - messagesQ *queue.Holder[Message] // used to send completed messages to receiver - txDisposition chan frameBodyEnvelope // used to funnel disposition frames through the mux - - unsettledMessages map[string]struct{} // used to keep track of messages being handled downstream - unsettledMessagesLock sync.RWMutex // lock to protect concurrent access to unsettledMessages - msgBuf buffer.Buffer // buffered bytes for current message - more bool // if true, buf contains a partial message - msg Message // current message being decoded - - settlementCount uint32 // the count of settled messages - settlementCountMu sync.Mutex // must be held when accessing settlementCount - - autoSendFlow bool // automatically send flow frames as credit becomes available - inFlight inFlight // used to track message disposition when rcv-settle-mode == second - creditor creditor // manages credits via calls to IssueCredit/DrainCredit -} - -// IssueCredit adds credits to be requested in the next flow request. -// Attempting to issue more credit than the receiver's max credit as -// specified in ReceiverOptions.MaxCredit will result in an error. -func (r *Receiver) IssueCredit(credit uint32) error { - if r.autoSendFlow { - return errors.New("issueCredit can only be used with receiver links using manual credit management") - } - - if err := r.creditor.IssueCredit(credit); err != nil { - return err - } - - // cause mux() to check our flow conditions. - select { - case r.receiverReady <- struct{}{}: - default: - } - - return nil -} - -// Prefetched returns the next message that is stored in the Receiver's -// prefetch cache. It does NOT wait for the remote sender to send messages -// and returns immediately if the prefetch cache is empty. To receive from the -// prefetch and wait for messages from the remote Sender use `Receive`. -// -// Once a message is received, and if the sender is configured in any mode other -// than SenderSettleModeSettled, you *must* take an action on the message by calling -// one of the following: AcceptMessage, RejectMessage, ReleaseMessage, ModifyMessage. -func (r *Receiver) Prefetched() *Message { - select { - case r.receiverReady <- struct{}{}: - default: - } - - // non-blocking receive to ensure buffered messages are - // delivered regardless of whether the link has been closed. - q := r.messagesQ.Acquire() - msg := q.Dequeue() - r.messagesQ.Release(q) - - if msg == nil { - return nil - } - - debug.Log(3, "RX (Receiver %p): prefetched delivery ID %d", r, msg.deliveryID) - - if msg.settled { - r.onSettlement(1) - } - - return msg -} - -// ReceiveOptions contains any optional values for the Receiver.Receive method. -type ReceiveOptions struct { - // for future expansion -} - -// Receive returns the next message from the sender. -// Blocks until a message is received, ctx completes, or an error occurs. -// -// Once a message is received, and if the sender is configured in any mode other -// than SenderSettleModeSettled, you *must* take an action on the message by calling -// one of the following: AcceptMessage, RejectMessage, ReleaseMessage, ModifyMessage. -func (r *Receiver) Receive(ctx context.Context, opts *ReceiveOptions) (*Message, error) { - if msg := r.Prefetched(); msg != nil { - return msg, nil - } - - // wait for the next message - select { - case q := <-r.messagesQ.Wait(): - msg := q.Dequeue() - debug.Assert(msg != nil) - debug.Log(3, "RX (Receiver %p): received delivery ID %d", r, msg.deliveryID) - r.messagesQ.Release(q) - if msg.settled { - r.onSettlement(1) - } - return msg, nil - case <-r.l.done: - // if the link receives messages and is then closed between the above call to r.Prefetched() - // and this select statement, the order of selecting r.messages and r.l.done is undefined. - // however, once r.l.done is closed the link cannot receive any more messages. so be sure to - // drain any that might have trickled in within this window. - if msg := r.Prefetched(); msg != nil { - return msg, nil - } - return nil, r.l.doneErr - case <-ctx.Done(): - return nil, ctx.Err() - } -} - -// Accept notifies the server that the message has been accepted and does not require redelivery. -// - ctx controls waiting for the peer to acknowledge the disposition -// - msg is the message to accept -// -// If the context's deadline expires or is cancelled before the operation -// completes, the message's disposition is in an unknown state. -func (r *Receiver) AcceptMessage(ctx context.Context, msg *Message) error { - return r.messageDisposition(ctx, msg, &encoding.StateAccepted{}) -} - -// Reject notifies the server that the message is invalid. -// - ctx controls waiting for the peer to acknowledge the disposition -// - msg is the message to reject -// - e is an optional rejection error -// -// If the context's deadline expires or is cancelled before the operation -// completes, the message's disposition is in an unknown state. -func (r *Receiver) RejectMessage(ctx context.Context, msg *Message, e *Error) error { - return r.messageDisposition(ctx, msg, &encoding.StateRejected{Error: e}) -} - -// Release releases the message back to the server. The message may be redelivered to this or another consumer. -// - ctx controls waiting for the peer to acknowledge the disposition -// - msg is the message to release -// -// If the context's deadline expires or is cancelled before the operation -// completes, the message's disposition is in an unknown state. -func (r *Receiver) ReleaseMessage(ctx context.Context, msg *Message) error { - return r.messageDisposition(ctx, msg, &encoding.StateReleased{}) -} - -// Modify notifies the server that the message was not acted upon and should be modifed. -// - ctx controls waiting for the peer to acknowledge the disposition -// - msg is the message to modify -// - options contains the optional settings to modify -// -// If the context's deadline expires or is cancelled before the operation -// completes, the message's disposition is in an unknown state. -func (r *Receiver) ModifyMessage(ctx context.Context, msg *Message, options *ModifyMessageOptions) error { - if options == nil { - options = &ModifyMessageOptions{} - } - return r.messageDisposition(ctx, - msg, &encoding.StateModified{ - DeliveryFailed: options.DeliveryFailed, - UndeliverableHere: options.UndeliverableHere, - MessageAnnotations: options.Annotations, - }) -} - -// ModifyMessageOptions contains the optional parameters to ModifyMessage. -type ModifyMessageOptions struct { - // DeliveryFailed indicates that the server must consider this an - // unsuccessful delivery attempt and increment the delivery count. - DeliveryFailed bool - - // UndeliverableHere indicates that the server must not redeliver - // the message to this link. - UndeliverableHere bool - - // Annotations is an optional annotation map to be merged - // with the existing message annotations, overwriting existing keys - // if necessary. - Annotations Annotations -} - -// Address returns the link's address. -func (r *Receiver) Address() string { - if r.l.source == nil { - return "" - } - return r.l.source.Address -} - -// LinkName returns associated link name or an empty string if link is not defined. -func (r *Receiver) LinkName() string { - return r.l.key.name -} - -// LinkSourceFilterValue retrieves the specified link source filter value or nil if it doesn't exist. -func (r *Receiver) LinkSourceFilterValue(name string) any { - if r.l.source == nil { - return nil - } - filter, ok := r.l.source.Filter[encoding.Symbol(name)] - if !ok { - return nil - } - return filter.Value -} - -// Close closes the Receiver and AMQP link. -// - ctx controls waiting for the peer to acknowledge the close -// -// If the context's deadline expires or is cancelled before the operation -// completes, an error is returned. However, the operation will continue to -// execute in the background. Subsequent calls will return a *LinkError -// that contains the context's error message. -func (r *Receiver) Close(ctx context.Context) error { - return r.l.closeLink(ctx) -} - -// sendDisposition sends a disposition frame to the peer -func (r *Receiver) sendDisposition(ctx context.Context, first uint32, last *uint32, state encoding.DeliveryState) error { - fr := &frames.PerformDisposition{ - Role: encoding.RoleReceiver, - First: first, - Last: last, - Settled: r.l.receiverSettleMode == nil || *r.l.receiverSettleMode == ReceiverSettleModeFirst, - State: state, - } - - sent := make(chan error, 1) - select { - case r.txDisposition <- frameBodyEnvelope{Ctx: ctx, FrameBody: fr, Sent: sent}: - debug.Log(2, "TX (Receiver %p): mux txDisposition %s", r, fr) - case <-r.l.done: - return r.l.doneErr - } - - select { - case err := <-sent: - return err - case <-r.l.done: - return r.l.doneErr - } -} - -func (r *Receiver) messageDisposition(ctx context.Context, msg *Message, state encoding.DeliveryState) error { - if msg.settled { - return nil - } - - // NOTE: we MUST add to the in-flight map before sending the disposition. if not, it's possible - // to receive the ack'ing disposition frame *before* the in-flight map has been updated which - // will cause the below <-wait to never trigger. - - var wait chan error - if r.l.receiverSettleMode != nil && *r.l.receiverSettleMode == ReceiverSettleModeSecond { - debug.Log(3, "TX (Receiver %p): delivery ID %d is in flight", r, msg.deliveryID) - wait = r.inFlight.add(msg) - } - - if err := r.sendDisposition(ctx, msg.deliveryID, nil, state); err != nil { - return err - } - - if wait == nil { - // mode first, there will be no settlement ack - r.deleteUnsettled(msg) - r.onSettlement(1) - return nil - } - - select { - case err := <-wait: - // err has three possibilities - // - nil, meaning the peer acknowledged the settlement - // - an *Error, meaning the peer rejected the message with a provided error - // - a non-AMQP error. this comes from calls to inFlight.clear() during mux unwind. - // only for the first two cases is the message considered settled - - if amqpErr := (&Error{}); err == nil || errors.As(err, &amqpErr) { - debug.Log(3, "RX (Receiver %p): delivery ID %d has been settled", r, msg.deliveryID) - // we've received confirmation of disposition - return err - } - - debug.Log(3, "RX (Receiver %p): error settling delivery ID %d: %v", r, msg.deliveryID, err) - return err - - case <-ctx.Done(): - // didn't receive the ack in the time allotted, leave message as unsettled - // TODO: if the ack arrives later, we need to remove the message from the unsettled map and reclaim the credit - return ctx.Err() - } -} - -// onSettlement is to be called after message settlement. -// - count is the number of messages that were settled -func (r *Receiver) onSettlement(count uint32) { - if !r.autoSendFlow { - return - } - - r.settlementCountMu.Lock() - r.settlementCount += count - r.settlementCountMu.Unlock() - - select { - case r.receiverReady <- struct{}{}: - // woke up - default: - // wake pending - } -} - -func (r *Receiver) addUnsettled(msg *Message) { - r.unsettledMessagesLock.Lock() - r.unsettledMessages[string(msg.DeliveryTag)] = struct{}{} - r.unsettledMessagesLock.Unlock() -} - -func (r *Receiver) deleteUnsettled(msg *Message) { - r.unsettledMessagesLock.Lock() - delete(r.unsettledMessages, string(msg.DeliveryTag)) - r.unsettledMessagesLock.Unlock() -} - -func (r *Receiver) countUnsettled() int { - r.unsettledMessagesLock.RLock() - count := len(r.unsettledMessages) - r.unsettledMessagesLock.RUnlock() - return count -} - -func newReceiver(source string, session *Session, opts *ReceiverOptions) (*Receiver, error) { - l := newLink(session, encoding.RoleReceiver) - l.source = &frames.Source{Address: source} - l.target = new(frames.Target) - l.linkCredit = defaultLinkCredit - r := &Receiver{ - l: l, - autoSendFlow: true, - receiverReady: make(chan struct{}, 1), - txDisposition: make(chan frameBodyEnvelope), - } - - r.messagesQ = queue.NewHolder(queue.New[Message](int(session.incomingWindow))) - - if opts == nil { - return r, nil - } - - for _, v := range opts.Capabilities { - r.l.target.Capabilities = append(r.l.target.Capabilities, encoding.Symbol(v)) - } - if opts.Credit > 0 { - r.l.linkCredit = uint32(opts.Credit) - } else if opts.Credit < 0 { - r.l.linkCredit = 0 - r.autoSendFlow = false - } - if opts.Durability > DurabilityUnsettledState { - return nil, fmt.Errorf("invalid Durability %d", opts.Durability) - } - r.l.target.Durable = opts.Durability - if opts.DynamicAddress { - r.l.source.Address = "" - r.l.dynamicAddr = opts.DynamicAddress - } - if opts.ExpiryPolicy != "" { - if err := encoding.ValidateExpiryPolicy(opts.ExpiryPolicy); err != nil { - return nil, err - } - r.l.target.ExpiryPolicy = opts.ExpiryPolicy - } - r.l.target.Timeout = opts.ExpiryTimeout - if opts.Filters != nil { - r.l.source.Filter = make(encoding.Filter) - for _, f := range opts.Filters { - f(r.l.source.Filter) - } - } - if opts.MaxMessageSize > 0 { - r.l.maxMessageSize = opts.MaxMessageSize - } - if opts.Name != "" { - r.l.key.name = opts.Name - } - if opts.Properties != nil { - r.l.properties = make(map[encoding.Symbol]any) - for k, v := range opts.Properties { - if k == "" { - return nil, errors.New("link property key must not be empty") - } - r.l.properties[encoding.Symbol(k)] = v - } - } - if opts.RequestedSenderSettleMode != nil { - if rsm := *opts.RequestedSenderSettleMode; rsm > SenderSettleModeMixed { - return nil, fmt.Errorf("invalid RequestedSenderSettleMode %d", rsm) - } - r.l.senderSettleMode = opts.RequestedSenderSettleMode - } - if opts.SettlementMode != nil { - if rsm := *opts.SettlementMode; rsm > ReceiverSettleModeSecond { - return nil, fmt.Errorf("invalid SettlementMode %d", rsm) - } - r.l.receiverSettleMode = opts.SettlementMode - } - r.l.target.Address = opts.TargetAddress - for _, v := range opts.SourceCapabilities { - r.l.source.Capabilities = append(r.l.source.Capabilities, encoding.Symbol(v)) - } - if opts.SourceDurability != DurabilityNone { - r.l.source.Durable = opts.SourceDurability - } - if opts.SourceExpiryPolicy != ExpiryPolicySessionEnd { - r.l.source.ExpiryPolicy = opts.SourceExpiryPolicy - } - if opts.SourceExpiryTimeout != 0 { - r.l.source.Timeout = opts.SourceExpiryTimeout - } - return r, nil -} - -// attach sends the Attach performative to establish the link with its parent session. -// this is automatically called by the new*Link constructors. -func (r *Receiver) attach(ctx context.Context) error { - if err := r.l.attach(ctx, func(pa *frames.PerformAttach) { - pa.Role = encoding.RoleReceiver - if pa.Source == nil { - pa.Source = new(frames.Source) - } - pa.Source.Dynamic = r.l.dynamicAddr - }, func(pa *frames.PerformAttach) { - if r.l.source == nil { - r.l.source = new(frames.Source) - } - // if dynamic address requested, copy assigned name to address - if r.l.dynamicAddr && pa.Source != nil { - r.l.source.Address = pa.Source.Address - } - // deliveryCount is a sequence number, must initialize to sender's initial sequence number - r.l.deliveryCount = pa.InitialDeliveryCount - r.unsettledMessages = map[string]struct{}{} - // copy the received filter values - if pa.Source != nil { - r.l.source.Filter = pa.Source.Filter - } - }); err != nil { - return err - } - - return nil -} - -func nop() {} - -type receiverTestHooks struct { - MuxStart func() - MuxSelect func() -} - -func (r *Receiver) mux(hooks receiverTestHooks) { - if hooks.MuxSelect == nil { - hooks.MuxSelect = nop - } - if hooks.MuxStart == nil { - hooks.MuxStart = nop - } - - defer func() { - // unblock any in flight message dispositions - r.inFlight.clear(r.l.doneErr) - - if !r.autoSendFlow { - // unblock any pending drain requests - r.creditor.EndDrain() - } - - close(r.l.done) - }() - - hooks.MuxStart() - - if r.autoSendFlow { - r.l.doneErr = r.muxFlow(r.l.linkCredit, false) - } - - for { - msgLen := r.messagesQ.Len() - - r.settlementCountMu.Lock() - // counter that accumulates the settled delivery count. - // once the threshold has been reached, the counter is - // reset and a flow frame is sent. - previousSettlementCount := r.settlementCount - if previousSettlementCount >= r.l.linkCredit { - r.settlementCount = 0 - } - r.settlementCountMu.Unlock() - - // once we have pending credit equal to or greater than our available credit, reclaim it. - // we do this instead of settlementCount > 0 to prevent flow frames from being too chatty. - // NOTE: we compare the settlementCount against the current link credit instead of some - // fixed threshold to ensure credit is reclaimed in cases where the number of unsettled - // messages remains high for whatever reason. - if r.autoSendFlow && previousSettlementCount > 0 && previousSettlementCount >= r.l.linkCredit { - debug.Log(1, "RX (Receiver %p) (auto): source: %q, inflight: %d, linkCredit: %d, deliveryCount: %d, messages: %d, unsettled: %d, settlementCount: %d, settleMode: %s", - r, r.l.source.Address, r.inFlight.len(), r.l.linkCredit, r.l.deliveryCount, msgLen, r.countUnsettled(), previousSettlementCount, r.l.receiverSettleMode.String()) - r.l.doneErr = r.creditor.IssueCredit(previousSettlementCount) - } else if r.l.linkCredit == 0 { - debug.Log(1, "RX (Receiver %p) (pause): source: %q, inflight: %d, linkCredit: %d, deliveryCount: %d, messages: %d, unsettled: %d, settlementCount: %d, settleMode: %s", - r, r.l.source.Address, r.inFlight.len(), r.l.linkCredit, r.l.deliveryCount, msgLen, r.countUnsettled(), previousSettlementCount, r.l.receiverSettleMode.String()) - } - - if r.l.doneErr != nil { - return - } - - drain, credits := r.creditor.FlowBits(r.l.linkCredit) - if drain || credits > 0 { - debug.Log(1, "RX (Receiver %p) (flow): source: %q, inflight: %d, curLinkCredit: %d, newLinkCredit: %d, drain: %v, deliveryCount: %d, messages: %d, unsettled: %d, settlementCount: %d, settleMode: %s", - r, r.l.source.Address, r.inFlight.len(), r.l.linkCredit, credits, drain, r.l.deliveryCount, msgLen, r.countUnsettled(), previousSettlementCount, r.l.receiverSettleMode.String()) - - // send a flow frame. - r.l.doneErr = r.muxFlow(credits, drain) - } - - if r.l.doneErr != nil { - return - } - - txDisposition := r.txDisposition - closed := r.l.close - if r.l.closeInProgress { - // swap out channel so it no longer triggers - closed = nil - - // disable sending of disposition frames once closing is in progress. - // this is to prevent races between mux shutdown and clearing of - // any in-flight dispositions. - txDisposition = nil - } - - hooks.MuxSelect() - - select { - case q := <-r.l.rxQ.Wait(): - // populated queue - fr := *q.Dequeue() - r.l.rxQ.Release(q) - - // if muxHandleFrame returns an error it means the mux must terminate. - // note that in the case of a client-side close due to an error, nil - // is returned in order to keep the mux running to ack the detach frame. - if err := r.muxHandleFrame(fr); err != nil { - r.l.doneErr = err - return - } - - case env := <-txDisposition: - r.l.txFrame(env.Ctx, env.FrameBody, env.Sent) - - case <-r.receiverReady: - continue - - case <-closed: - if r.l.closeInProgress { - // a client-side close due to protocol error is in progress - continue - } - - // receiver is being closed by the client - r.l.closeInProgress = true - fr := &frames.PerformDetach{ - Handle: r.l.handle, - Closed: true, - } - r.l.txFrame(context.Background(), fr, nil) - - case <-r.l.session.done: - r.l.doneErr = r.l.session.doneErr - return - } - } -} - -// muxFlow sends tr to the session mux. -// l.linkCredit will also be updated to `linkCredit` -func (r *Receiver) muxFlow(linkCredit uint32, drain bool) error { - var ( - deliveryCount = r.l.deliveryCount - ) - - fr := &frames.PerformFlow{ - Handle: &r.l.handle, - DeliveryCount: &deliveryCount, - LinkCredit: &linkCredit, // max number of messages, - Drain: drain, - } - - // Update credit. This must happen before entering loop below - // because incoming messages handled while waiting to transmit - // flow increment deliveryCount. This causes the credit to become - // out of sync with the server. - - if !drain { - // if we're draining we don't want to touch our internal credit - we're not changing it so any issued credits - // are still valid until drain completes, at which point they will be naturally zeroed. - r.l.linkCredit = linkCredit - } - - select { - case r.l.session.tx <- frameBodyEnvelope{Ctx: context.Background(), FrameBody: fr}: - debug.Log(2, "TX (Receiver %p): mux frame to Session (%p): %d, %s", r, r.l.session, r.l.session.channel, fr) - return nil - case <-r.l.close: - return nil - case <-r.l.session.done: - return r.l.session.doneErr - } -} - -// muxHandleFrame processes fr based on type. -func (r *Receiver) muxHandleFrame(fr frames.FrameBody) error { - debug.Log(2, "RX (Receiver %p): %s", r, fr) - switch fr := fr.(type) { - // message frame - case *frames.PerformTransfer: - r.muxReceive(*fr) - - // flow control frame - case *frames.PerformFlow: - if !fr.Echo { - // if the 'drain' flag has been set in the frame sent to the _receiver_ then - // we signal whomever is waiting (the service has seen and acknowledged our drain) - if fr.Drain && !r.autoSendFlow { - r.l.linkCredit = 0 // we have no active credits at this point. - r.creditor.EndDrain() - } - return nil - } - - var ( - // copy because sent by pointer below; prevent race - linkCredit = r.l.linkCredit - deliveryCount = r.l.deliveryCount - ) - - // send flow - resp := &frames.PerformFlow{ - Handle: &r.l.handle, - DeliveryCount: &deliveryCount, - LinkCredit: &linkCredit, // max number of messages - } - - select { - case r.l.session.tx <- frameBodyEnvelope{Ctx: context.Background(), FrameBody: resp}: - debug.Log(2, "TX (Receiver %p): mux frame to Session (%p): %d, %s", r, r.l.session, r.l.session.channel, resp) - case <-r.l.close: - return nil - case <-r.l.session.done: - return r.l.session.doneErr - } - - case *frames.PerformDisposition: - // Unblock receivers waiting for message disposition - // bubble disposition error up to the receiver - var dispositionError error - if state, ok := fr.State.(*encoding.StateRejected); ok { - // state.Error isn't required to be filled out. For instance if you dead letter a message - // you will get a rejected response that doesn't contain an error. - if state.Error != nil { - dispositionError = state.Error - } - } - // removal from the in-flight map will also remove the message from the unsettled map - count := r.inFlight.remove(fr.First, fr.Last, dispositionError, func(msg *Message) { - r.deleteUnsettled(msg) - msg.settled = true - }) - r.onSettlement(count) - - default: - return r.l.muxHandleFrame(fr) - } - - return nil -} - -func (r *Receiver) muxReceive(fr frames.PerformTransfer) { - if !r.more { - // this is the first transfer of a message, - // record the delivery ID, message format, - // and delivery Tag - if fr.DeliveryID != nil { - r.msg.deliveryID = *fr.DeliveryID - } - if fr.MessageFormat != nil { - r.msg.Format = *fr.MessageFormat - } - r.msg.DeliveryTag = fr.DeliveryTag - - // these fields are required on first transfer of a message - if fr.DeliveryID == nil { - r.l.closeWithError(ErrCondNotAllowed, "received message without a delivery-id") - return - } - if fr.MessageFormat == nil { - r.l.closeWithError(ErrCondNotAllowed, "received message without a message-format") - return - } - if fr.DeliveryTag == nil { - r.l.closeWithError(ErrCondNotAllowed, "received message without a delivery-tag") - return - } - } else { - // this is a continuation of a multipart message - // some fields may be omitted on continuation transfers, - // but if they are included they must be consistent - // with the first. - - if fr.DeliveryID != nil && *fr.DeliveryID != r.msg.deliveryID { - msg := fmt.Sprintf( - "received continuation transfer with inconsistent delivery-id: %d != %d", - *fr.DeliveryID, r.msg.deliveryID, - ) - r.l.closeWithError(ErrCondNotAllowed, msg) - return - } - if fr.MessageFormat != nil && *fr.MessageFormat != r.msg.Format { - msg := fmt.Sprintf( - "received continuation transfer with inconsistent message-format: %d != %d", - *fr.MessageFormat, r.msg.Format, - ) - r.l.closeWithError(ErrCondNotAllowed, msg) - return - } - if fr.DeliveryTag != nil && !bytes.Equal(fr.DeliveryTag, r.msg.DeliveryTag) { - msg := fmt.Sprintf( - "received continuation transfer with inconsistent delivery-tag: %q != %q", - fr.DeliveryTag, r.msg.DeliveryTag, - ) - r.l.closeWithError(ErrCondNotAllowed, msg) - return - } - } - - // discard message if it's been aborted - if fr.Aborted { - r.msgBuf.Reset() - r.msg = Message{} - r.more = false - return - } - - // ensure maxMessageSize will not be exceeded - if r.l.maxMessageSize != 0 && uint64(r.msgBuf.Len())+uint64(len(fr.Payload)) > r.l.maxMessageSize { - r.l.closeWithError(ErrCondMessageSizeExceeded, fmt.Sprintf("received message larger than max size of %d", r.l.maxMessageSize)) - return - } - - // add the payload the the buffer - r.msgBuf.Append(fr.Payload) - - // mark as settled if at least one frame is settled - r.msg.settled = r.msg.settled || fr.Settled - - // save in-progress status - r.more = fr.More - - if fr.More { - return - } - - // last frame in message - err := r.msg.Unmarshal(&r.msgBuf) - if err != nil { - r.l.closeWithError(ErrCondInternalError, err.Error()) - return - } - - // send to receiver - if !r.msg.settled { - r.addUnsettled(&r.msg) - debug.Log(3, "RX (Receiver %p): add unsettled delivery ID %d", r, r.msg.deliveryID) - } - - q := r.messagesQ.Acquire() - q.Enqueue(r.msg) - msgLen := q.Len() - r.messagesQ.Release(q) - - // reset progress - r.msgBuf.Reset() - r.msg = Message{} - - // decrement link-credit after entire message received - r.l.deliveryCount++ - r.l.linkCredit-- - debug.Log(3, "RX (Receiver %p) link %s - deliveryCount: %d, linkCredit: %d, len(messages): %d", r, r.l.key.name, r.l.deliveryCount, r.l.linkCredit, msgLen) -} - -// inFlight tracks in-flight message dispositions allowing receivers -// to block waiting for the server to respond when an appropriate -// settlement mode is configured. -type inFlight struct { - mu sync.RWMutex - m map[uint32]inFlightInfo -} - -type inFlightInfo struct { - wait chan error - msg *Message -} - -func (f *inFlight) add(msg *Message) chan error { - wait := make(chan error, 1) - - f.mu.Lock() - if f.m == nil { - f.m = make(map[uint32]inFlightInfo) - } - - f.m[msg.deliveryID] = inFlightInfo{wait: wait, msg: msg} - f.mu.Unlock() - - return wait -} - -func (f *inFlight) remove(first uint32, last *uint32, err error, handler func(*Message)) uint32 { - f.mu.Lock() - - if f.m == nil { - f.mu.Unlock() - return 0 - } - - ll := first - if last != nil { - ll = *last - } - - count := uint32(0) - for i := first; i <= ll; i++ { - info, ok := f.m[i] - if ok { - handler(info.msg) - info.wait <- err - delete(f.m, i) - count++ - } - } - - f.mu.Unlock() - return count -} - -func (f *inFlight) clear(err error) { - f.mu.Lock() - for id, info := range f.m { - info.wait <- err - delete(f.m, id) - } - f.mu.Unlock() -} - -func (f *inFlight) len() int { - f.mu.RLock() - defer f.mu.RUnlock() - return len(f.m) -} diff --git a/sdk/messaging/azservicebus/internal/go-amqp/sasl.go b/sdk/messaging/azservicebus/internal/go-amqp/sasl.go deleted file mode 100644 index c50ecdcbd83e..000000000000 --- a/sdk/messaging/azservicebus/internal/go-amqp/sasl.go +++ /dev/null @@ -1,262 +0,0 @@ -// Copyright (C) 2017 Kale Blankenship -// Portions Copyright (c) Microsoft Corporation - -package amqp - -import ( - "context" - "fmt" - - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp/internal/debug" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp/internal/encoding" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp/internal/frames" -) - -// SASL Mechanisms -const ( - saslMechanismPLAIN encoding.Symbol = "PLAIN" - saslMechanismANONYMOUS encoding.Symbol = "ANONYMOUS" - saslMechanismEXTERNAL encoding.Symbol = "EXTERNAL" - saslMechanismXOAUTH2 encoding.Symbol = "XOAUTH2" -) - -// SASLType represents a SASL configuration to use during authentication. -type SASLType func(c *Conn) error - -// ConnSASLPlain enables SASL PLAIN authentication for the connection. -// -// SASL PLAIN transmits credentials in plain text and should only be used -// on TLS/SSL enabled connection. -func SASLTypePlain(username, password string) SASLType { - // TODO: how widely used is hostname? should it be supported - return func(c *Conn) error { - // make handlers map if no other mechanism has - if c.saslHandlers == nil { - c.saslHandlers = make(map[encoding.Symbol]stateFunc) - } - - // add the handler the the map - c.saslHandlers[saslMechanismPLAIN] = func(ctx context.Context) (stateFunc, error) { - // send saslInit with PLAIN payload - init := &frames.SASLInit{ - Mechanism: "PLAIN", - InitialResponse: []byte("\x00" + username + "\x00" + password), - Hostname: "", - } - fr := frames.Frame{ - Type: frames.TypeSASL, - Body: init, - } - debug.Log(1, "TX (ConnSASLPlain %p): %s", c, fr) - timeout, err := c.getWriteTimeout(ctx) - if err != nil { - return nil, err - } - if err = c.writeFrame(timeout, fr); err != nil { - return nil, err - } - - // go to c.saslOutcome to handle the server response - return c.saslOutcome, nil - } - return nil - } -} - -// ConnSASLAnonymous enables SASL ANONYMOUS authentication for the connection. -func SASLTypeAnonymous() SASLType { - return func(c *Conn) error { - // make handlers map if no other mechanism has - if c.saslHandlers == nil { - c.saslHandlers = make(map[encoding.Symbol]stateFunc) - } - - // add the handler the the map - c.saslHandlers[saslMechanismANONYMOUS] = func(ctx context.Context) (stateFunc, error) { - init := &frames.SASLInit{ - Mechanism: saslMechanismANONYMOUS, - InitialResponse: []byte("anonymous"), - } - fr := frames.Frame{ - Type: frames.TypeSASL, - Body: init, - } - debug.Log(1, "TX (ConnSASLAnonymous %p): %s", c, fr) - timeout, err := c.getWriteTimeout(ctx) - if err != nil { - return nil, err - } - if err = c.writeFrame(timeout, fr); err != nil { - return nil, err - } - - // go to c.saslOutcome to handle the server response - return c.saslOutcome, nil - } - return nil - } -} - -// ConnSASLExternal enables SASL EXTERNAL authentication for the connection. -// The value for resp is dependent on the type of authentication (empty string is common for TLS). -// See https://datatracker.ietf.org/doc/html/rfc4422#appendix-A for additional info. -func SASLTypeExternal(resp string) SASLType { - return func(c *Conn) error { - // make handlers map if no other mechanism has - if c.saslHandlers == nil { - c.saslHandlers = make(map[encoding.Symbol]stateFunc) - } - - // add the handler the the map - c.saslHandlers[saslMechanismEXTERNAL] = func(ctx context.Context) (stateFunc, error) { - init := &frames.SASLInit{ - Mechanism: saslMechanismEXTERNAL, - InitialResponse: []byte(resp), - } - fr := frames.Frame{ - Type: frames.TypeSASL, - Body: init, - } - debug.Log(1, "TX (ConnSASLExternal %p): %s", c, fr) - timeout, err := c.getWriteTimeout(ctx) - if err != nil { - return nil, err - } - if err = c.writeFrame(timeout, fr); err != nil { - return nil, err - } - - // go to c.saslOutcome to handle the server response - return c.saslOutcome, nil - } - return nil - } -} - -// ConnSASLXOAUTH2 enables SASL XOAUTH2 authentication for the connection. -// -// The saslMaxFrameSizeOverride parameter allows the limit that governs the maximum frame size this client will allow -// itself to generate to be raised for the sasl-init frame only. Set this when the size of the size of the SASL XOAUTH2 -// initial client response (which contains the username and bearer token) would otherwise breach the 512 byte min-max-frame-size -// (http://docs.oasis-open.org/amqp/core/v1.0/os/amqp-core-transport-v1.0-os.html#definition-MIN-MAX-FRAME-SIZE). Pass -1 -// to keep the default. -// -// SASL XOAUTH2 transmits the bearer in plain text and should only be used -// on TLS/SSL enabled connection. -func SASLTypeXOAUTH2(username, bearer string, saslMaxFrameSizeOverride uint32) SASLType { - return func(c *Conn) error { - // make handlers map if no other mechanism has - if c.saslHandlers == nil { - c.saslHandlers = make(map[encoding.Symbol]stateFunc) - } - - response, err := saslXOAUTH2InitialResponse(username, bearer) - if err != nil { - return err - } - - handler := saslXOAUTH2Handler{ - conn: c, - maxFrameSizeOverride: saslMaxFrameSizeOverride, - response: response, - } - // add the handler the the map - c.saslHandlers[saslMechanismXOAUTH2] = handler.init - return nil - } -} - -type saslXOAUTH2Handler struct { - conn *Conn - maxFrameSizeOverride uint32 - response []byte - errorResponse []byte // https://developers.google.com/gmail/imap/xoauth2-protocol#error_response -} - -func (s saslXOAUTH2Handler) init(ctx context.Context) (stateFunc, error) { - originalPeerMaxFrameSize := s.conn.peerMaxFrameSize - if s.maxFrameSizeOverride > s.conn.peerMaxFrameSize { - s.conn.peerMaxFrameSize = s.maxFrameSizeOverride - } - timeout, err := s.conn.getWriteTimeout(ctx) - if err != nil { - return nil, err - } - err = s.conn.writeFrame(timeout, frames.Frame{ - Type: frames.TypeSASL, - Body: &frames.SASLInit{ - Mechanism: saslMechanismXOAUTH2, - InitialResponse: s.response, - }, - }) - s.conn.peerMaxFrameSize = originalPeerMaxFrameSize - if err != nil { - return nil, err - } - - return s.step, nil -} - -func (s saslXOAUTH2Handler) step(ctx context.Context) (stateFunc, error) { - // read challenge or outcome frame - fr, err := s.conn.readFrame() - if err != nil { - return nil, err - } - - switch v := fr.Body.(type) { - case *frames.SASLOutcome: - // check if auth succeeded - if v.Code != encoding.CodeSASLOK { - return nil, fmt.Errorf("SASL XOAUTH2 auth failed with code %#00x: %s : %s", - v.Code, v.AdditionalData, s.errorResponse) - } - - // return to c.negotiateProto - s.conn.saslComplete = true - return s.conn.negotiateProto, nil - case *frames.SASLChallenge: - if s.errorResponse == nil { - s.errorResponse = v.Challenge - - timeout, err := s.conn.getWriteTimeout(ctx) - if err != nil { - return nil, err - } - - // The SASL protocol requires clients to send an empty response to this challenge. - err = s.conn.writeFrame(timeout, frames.Frame{ - Type: frames.TypeSASL, - Body: &frames.SASLResponse{ - Response: []byte{}, - }, - }) - if err != nil { - return nil, err - } - return s.step, nil - } else { - return nil, fmt.Errorf("SASL XOAUTH2 unexpected additional error response received during "+ - "exchange. Initial error response: %s, additional response: %s", s.errorResponse, v.Challenge) - } - default: - return nil, fmt.Errorf("sasl: unexpected frame type %T", fr.Body) - } -} - -func saslXOAUTH2InitialResponse(username string, bearer string) ([]byte, error) { - if len(bearer) == 0 { - return []byte{}, fmt.Errorf("unacceptable bearer token") - } - for _, char := range bearer { - if char < '\x20' || char > '\x7E' { - return []byte{}, fmt.Errorf("unacceptable bearer token") - } - } - for _, char := range username { - if char == '\x01' { - return []byte{}, fmt.Errorf("unacceptable username") - } - } - return []byte("user=" + username + "\x01auth=Bearer " + bearer + "\x01\x01"), nil -} diff --git a/sdk/messaging/azservicebus/internal/go-amqp/sender.go b/sdk/messaging/azservicebus/internal/go-amqp/sender.go deleted file mode 100644 index afe17e7f8e1c..000000000000 --- a/sdk/messaging/azservicebus/internal/go-amqp/sender.go +++ /dev/null @@ -1,482 +0,0 @@ -// Copyright (C) 2017 Kale Blankenship -// Portions Copyright (c) Microsoft Corporation - -package amqp - -import ( - "context" - "encoding/binary" - "errors" - "fmt" - "sync" - - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp/internal/buffer" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp/internal/debug" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp/internal/encoding" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp/internal/frames" -) - -// Sender sends messages on a single AMQP link. -type Sender struct { - l link - transfers chan transferEnvelope // sender uses to send transfer frames - - mu sync.Mutex // protects buf and nextDeliveryTag - buf buffer.Buffer - nextDeliveryTag uint64 -} - -// LinkName() is the name of the link used for this Sender. -func (s *Sender) LinkName() string { - return s.l.key.name -} - -// MaxMessageSize is the maximum size of a single message. -func (s *Sender) MaxMessageSize() uint64 { - return s.l.maxMessageSize -} - -// SendOptions contains any optional values for the Sender.Send method. -type SendOptions struct { - // Indicates the message is to be sent as settled when settlement mode is SenderSettleModeMixed. - // If the settlement mode is SenderSettleModeUnsettled and Settled is true, an error is returned. - Settled bool -} - -// Send sends a Message. -// -// Blocks until the message is sent or an error occurs. If the peer is -// configured for receiver settlement mode second, the call also blocks -// until the peer confirms message settlement. -// -// - ctx controls waiting for the message to be sent and possibly confirmed -// - msg is the message to send -// - opts contains optional values, pass nil to accept the defaults -// -// If the context's deadline expires or is cancelled before the operation -// completes, the message is in an unknown state of transmission. -// -// Send is safe for concurrent use. Since only a single message can be -// sent on a link at a time, this is most useful when settlement confirmation -// has been requested (receiver settle mode is second). In this case, -// additional messages can be sent while the current goroutine is waiting -// for the confirmation. -func (s *Sender) Send(ctx context.Context, msg *Message, opts *SendOptions) error { - // check if the link is dead. while it's safe to call s.send - // in this case, this will avoid some allocations etc. - select { - case <-s.l.done: - return s.l.doneErr - default: - // link is still active - } - done, err := s.send(ctx, msg, opts) - if err != nil { - return err - } - - // wait for transfer to be confirmed - select { - case state := <-done: - if state, ok := state.(*encoding.StateRejected); ok { - if state.Error != nil { - return state.Error - } - return errors.New("the peer rejected the message without specifying an error") - } - return nil - case <-s.l.done: - return s.l.doneErr - case <-ctx.Done(): - // TODO: if the message is not settled and we never received a disposition, how can we consider the message as sent? - return ctx.Err() - } -} - -// send is separated from Send so that the mutex unlock can be deferred without -// locking the transfer confirmation that happens in Send. -func (s *Sender) send(ctx context.Context, msg *Message, opts *SendOptions) (chan encoding.DeliveryState, error) { - const ( - maxDeliveryTagLength = 32 - maxTransferFrameHeader = 66 // determined by calcMaxTransferFrameHeader - ) - if len(msg.DeliveryTag) > maxDeliveryTagLength { - return nil, &Error{ - Condition: ErrCondMessageSizeExceeded, - Description: fmt.Sprintf("delivery tag is over the allowed %v bytes, len: %v", maxDeliveryTagLength, len(msg.DeliveryTag)), - } - } - - s.mu.Lock() - defer s.mu.Unlock() - - s.buf.Reset() - err := msg.Marshal(&s.buf) - if err != nil { - return nil, err - } - - if s.l.maxMessageSize != 0 && uint64(s.buf.Len()) > s.l.maxMessageSize { - return nil, &Error{ - Condition: ErrCondMessageSizeExceeded, - Description: fmt.Sprintf("encoded message size exceeds max of %d", s.l.maxMessageSize), - } - } - - senderSettled := senderSettleModeValue(s.l.senderSettleMode) == SenderSettleModeSettled - if opts != nil { - if opts.Settled && senderSettleModeValue(s.l.senderSettleMode) == SenderSettleModeUnsettled { - return nil, errors.New("can't send message as settled when sender settlement mode is unsettled") - } else if opts.Settled { - senderSettled = true - } - } - - var ( - maxPayloadSize = int64(s.l.session.conn.peerMaxFrameSize) - maxTransferFrameHeader - ) - - deliveryTag := msg.DeliveryTag - if len(deliveryTag) == 0 { - // use uint64 encoded as []byte as deliveryTag - deliveryTag = make([]byte, 8) - binary.BigEndian.PutUint64(deliveryTag, s.nextDeliveryTag) - s.nextDeliveryTag++ - } - - fr := frames.PerformTransfer{ - Handle: s.l.handle, - DeliveryID: &needsDeliveryID, - DeliveryTag: deliveryTag, - MessageFormat: &msg.Format, - More: s.buf.Len() > 0, - } - - for fr.More { - buf, _ := s.buf.Next(maxPayloadSize) - fr.Payload = append([]byte(nil), buf...) - fr.More = s.buf.Len() > 0 - if !fr.More { - // SSM=settled: overrides RSM; no acks. - // SSM=unsettled: sender should wait for receiver to ack - // RSM=first: receiver considers it settled immediately, but must still send ack (SSM=unsettled only) - // RSM=second: receiver sends ack and waits for return ack from sender (SSM=unsettled only) - - // mark final transfer as settled when sender mode is settled - fr.Settled = senderSettled - - // set done on last frame - fr.Done = make(chan encoding.DeliveryState, 1) - } - - // NOTE: we MUST send a copy of fr here since we modify it post send - - sent := make(chan error, 1) - select { - case s.transfers <- transferEnvelope{Ctx: ctx, Frame: fr, Sent: sent}: - // frame was sent to our mux - case <-s.l.done: - return nil, s.l.doneErr - case <-ctx.Done(): - return nil, &Error{Condition: ErrCondTransferLimitExceeded, Description: fmt.Sprintf("credit limit exceeded for sending link %s", s.l.key.name)} - } - - select { - case err := <-sent: - if err != nil { - return nil, err - } - case <-s.l.done: - return nil, s.l.doneErr - } - - // clear values that are only required on first message - fr.DeliveryID = nil - fr.DeliveryTag = nil - fr.MessageFormat = nil - } - - return fr.Done, nil -} - -// Address returns the link's address. -func (s *Sender) Address() string { - if s.l.target == nil { - return "" - } - return s.l.target.Address -} - -// Close closes the Sender and AMQP link. -// - ctx controls waiting for the peer to acknowledge the close -// -// If the context's deadline expires or is cancelled before the operation -// completes, an error is returned. However, the operation will continue to -// execute in the background. Subsequent calls will return a *LinkError -// that contains the context's error message. -func (s *Sender) Close(ctx context.Context) error { - return s.l.closeLink(ctx) -} - -// newSendingLink creates a new sending link and attaches it to the session -func newSender(target string, session *Session, opts *SenderOptions) (*Sender, error) { - l := newLink(session, encoding.RoleSender) - l.target = &frames.Target{Address: target} - l.source = new(frames.Source) - s := &Sender{ - l: l, - } - - if opts == nil { - return s, nil - } - - for _, v := range opts.Capabilities { - s.l.source.Capabilities = append(s.l.source.Capabilities, encoding.Symbol(v)) - } - if opts.Durability > DurabilityUnsettledState { - return nil, fmt.Errorf("invalid Durability %d", opts.Durability) - } - s.l.source.Durable = opts.Durability - if opts.DynamicAddress { - s.l.target.Address = "" - s.l.dynamicAddr = opts.DynamicAddress - } - if opts.ExpiryPolicy != "" { - if err := encoding.ValidateExpiryPolicy(opts.ExpiryPolicy); err != nil { - return nil, err - } - s.l.source.ExpiryPolicy = opts.ExpiryPolicy - } - s.l.source.Timeout = opts.ExpiryTimeout - if opts.Name != "" { - s.l.key.name = opts.Name - } - if opts.Properties != nil { - s.l.properties = make(map[encoding.Symbol]any) - for k, v := range opts.Properties { - if k == "" { - return nil, errors.New("link property key must not be empty") - } - s.l.properties[encoding.Symbol(k)] = v - } - } - if opts.RequestedReceiverSettleMode != nil { - if rsm := *opts.RequestedReceiverSettleMode; rsm > ReceiverSettleModeSecond { - return nil, fmt.Errorf("invalid RequestedReceiverSettleMode %d", rsm) - } - s.l.receiverSettleMode = opts.RequestedReceiverSettleMode - } - if opts.SettlementMode != nil { - if ssm := *opts.SettlementMode; ssm > SenderSettleModeMixed { - return nil, fmt.Errorf("invalid SettlementMode %d", ssm) - } - s.l.senderSettleMode = opts.SettlementMode - } - s.l.source.Address = opts.SourceAddress - for _, v := range opts.TargetCapabilities { - s.l.target.Capabilities = append(s.l.target.Capabilities, encoding.Symbol(v)) - } - if opts.TargetDurability != DurabilityNone { - s.l.target.Durable = opts.TargetDurability - } - if opts.TargetExpiryPolicy != ExpiryPolicySessionEnd { - s.l.target.ExpiryPolicy = opts.TargetExpiryPolicy - } - if opts.TargetExpiryTimeout != 0 { - s.l.target.Timeout = opts.TargetExpiryTimeout - } - return s, nil -} - -func (s *Sender) attach(ctx context.Context) error { - if err := s.l.attach(ctx, func(pa *frames.PerformAttach) { - pa.Role = encoding.RoleSender - if pa.Target == nil { - pa.Target = new(frames.Target) - } - pa.Target.Dynamic = s.l.dynamicAddr - }, func(pa *frames.PerformAttach) { - if s.l.target == nil { - s.l.target = new(frames.Target) - } - - // if dynamic address requested, copy assigned name to address - if s.l.dynamicAddr && pa.Target != nil { - s.l.target.Address = pa.Target.Address - } - }); err != nil { - return err - } - - s.transfers = make(chan transferEnvelope) - - return nil -} - -type senderTestHooks struct { - MuxTransfer func() -} - -func (s *Sender) mux(hooks senderTestHooks) { - if hooks.MuxTransfer == nil { - hooks.MuxTransfer = nop - } - - defer func() { - close(s.l.done) - }() - -Loop: - for { - var outgoingTransfers chan transferEnvelope - if s.l.linkCredit > 0 { - debug.Log(1, "TX (Sender %p) (enable): target: %q, link credit: %d, deliveryCount: %d", s, s.l.target.Address, s.l.linkCredit, s.l.deliveryCount) - outgoingTransfers = s.transfers - } else { - debug.Log(1, "TX (Sender %p) (pause): target: %q, link credit: %d, deliveryCount: %d", s, s.l.target.Address, s.l.linkCredit, s.l.deliveryCount) - } - - closed := s.l.close - if s.l.closeInProgress { - // swap out channel so it no longer triggers - closed = nil - - // disable sending once closing is in progress. - // this prevents races with mux shutdown and - // the peer sending disposition frames. - outgoingTransfers = nil - } - - select { - // received frame - case q := <-s.l.rxQ.Wait(): - // populated queue - fr := *q.Dequeue() - s.l.rxQ.Release(q) - - // if muxHandleFrame returns an error it means the mux must terminate. - // note that in the case of a client-side close due to an error, nil - // is returned in order to keep the mux running to ack the detach frame. - if err := s.muxHandleFrame(fr); err != nil { - s.l.doneErr = err - return - } - - // send data - case env := <-outgoingTransfers: - hooks.MuxTransfer() - select { - case s.l.session.txTransfer <- env: - debug.Log(2, "TX (Sender %p): mux transfer to Session: %d, %s", s, s.l.session.channel, env.Frame) - // decrement link-credit after entire message transferred - if !env.Frame.More { - s.l.deliveryCount++ - s.l.linkCredit-- - // we are the sender and we keep track of the peer's link credit - debug.Log(3, "TX (Sender %p): link: %s, link credit: %d", s, s.l.key.name, s.l.linkCredit) - } - continue Loop - case <-s.l.close: - continue Loop - case <-s.l.session.done: - continue Loop - } - - case <-closed: - if s.l.closeInProgress { - // a client-side close due to protocol error is in progress - continue - } - - // sender is being closed by the client - s.l.closeInProgress = true - fr := &frames.PerformDetach{ - Handle: s.l.handle, - Closed: true, - } - s.l.txFrame(context.Background(), fr, nil) - - case <-s.l.session.done: - s.l.doneErr = s.l.session.doneErr - return - } - } -} - -// muxHandleFrame processes fr based on type. -// depending on the peer's RSM, it might return a disposition frame for sending -func (s *Sender) muxHandleFrame(fr frames.FrameBody) error { - debug.Log(2, "RX (Sender %p): %s", s, fr) - switch fr := fr.(type) { - // flow control frame - case *frames.PerformFlow: - // the sender's link-credit variable MUST be set according to this formula when flow information is given by the receiver: - // link-credit(snd) := delivery-count(rcv) + link-credit(rcv) - delivery-count(snd) - linkCredit := *fr.LinkCredit - s.l.deliveryCount - if fr.DeliveryCount != nil { - // DeliveryCount can be nil if the receiver hasn't processed - // the attach. That shouldn't be the case here, but it's - // what ActiveMQ does. - linkCredit += *fr.DeliveryCount - } - - s.l.linkCredit = linkCredit - - if !fr.Echo { - return nil - } - - var ( - // copy because sent by pointer below; prevent race - deliveryCount = s.l.deliveryCount - ) - - // send flow - resp := &frames.PerformFlow{ - Handle: &s.l.handle, - DeliveryCount: &deliveryCount, - LinkCredit: &linkCredit, // max number of messages - } - - select { - case s.l.session.tx <- frameBodyEnvelope{Ctx: context.Background(), FrameBody: resp}: - debug.Log(2, "TX (Sender %p): mux frame to Session (%p): %d, %s", s, s.l.session, s.l.session.channel, resp) - case <-s.l.close: - return nil - case <-s.l.session.done: - return s.l.session.doneErr - } - - case *frames.PerformDisposition: - if fr.Settled { - return nil - } - - // peer is in mode second, so we must send confirmation of disposition. - // NOTE: the ack must be sent through the session so it can close out - // the in-flight disposition. - dr := &frames.PerformDisposition{ - Role: encoding.RoleSender, - First: fr.First, - Last: fr.Last, - Settled: true, - } - - select { - case s.l.session.tx <- frameBodyEnvelope{Ctx: context.Background(), FrameBody: dr}: - debug.Log(2, "TX (Sender %p): mux frame to Session (%p): %d, %s", s, s.l.session, s.l.session.channel, dr) - case <-s.l.close: - return nil - case <-s.l.session.done: - return s.l.session.doneErr - } - - return nil - - default: - return s.l.muxHandleFrame(fr) - } - - return nil -} diff --git a/sdk/messaging/azservicebus/internal/go-amqp/session.go b/sdk/messaging/azservicebus/internal/go-amqp/session.go deleted file mode 100644 index d9087be92e80..000000000000 --- a/sdk/messaging/azservicebus/internal/go-amqp/session.go +++ /dev/null @@ -1,792 +0,0 @@ -// Copyright (C) 2017 Kale Blankenship -// Portions Copyright (c) Microsoft Corporation - -package amqp - -import ( - "context" - "errors" - "fmt" - "math" - "sync" - - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp/internal/bitmap" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp/internal/debug" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp/internal/encoding" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp/internal/frames" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp/internal/queue" -) - -// Default session options -const ( - defaultWindow = 5000 -) - -// SessionOptions contains the optional settings for configuring an AMQP session. -type SessionOptions struct { - // MaxLinks sets the maximum number of links (Senders/Receivers) - // allowed on the session. - // - // Minimum: 1. - // Default: 4294967295. - MaxLinks uint32 -} - -// Session is an AMQP session. -// -// A session multiplexes Receivers. -type Session struct { - channel uint16 // session's local channel - remoteChannel uint16 // session's remote channel, owned by conn.connReader - conn *Conn // underlying conn - tx chan frameBodyEnvelope // non-transfer frames to be sent; session must track disposition - txTransfer chan transferEnvelope // transfer frames to be sent; session must track disposition - - // frames destined for this session are added to this queue by conn.connReader - rxQ *queue.Holder[frames.FrameBody] - - // flow control - incomingWindow uint32 - outgoingWindow uint32 - needFlowCount uint32 - - handleMax uint32 - - // link management - linksMu sync.RWMutex // used to synchronize link handle allocation - linksByKey map[linkKey]*link // mapping of name+role link - handles *bitmap.Bitmap // allocated handles - - abandonedLinksMu sync.Mutex - abandonedLinks []*link - - // used for gracefully closing session - close chan struct{} // closed by calling Close(). it signals that the end performative should be sent - closeOnce sync.Once - - // part of internal public surface area - done chan struct{} // closed when the session has terminated (mux exited); DO NOT wait on this from within Session.mux() as it will never trigger! - endSent chan struct{} // closed when the end performative has been sent; once this is closed, links MUST NOT send any frames! - doneErr error // contains the mux error state; ONLY written to by the mux and MUST only be read from after done is closed! - closeErr error // contains the error state returned from Close(); ONLY Close() reads/writes this! -} - -func newSession(c *Conn, channel uint16, opts *SessionOptions) *Session { - s := &Session{ - conn: c, - channel: channel, - tx: make(chan frameBodyEnvelope), - txTransfer: make(chan transferEnvelope), - incomingWindow: defaultWindow, - outgoingWindow: defaultWindow, - handleMax: math.MaxUint32 - 1, - linksMu: sync.RWMutex{}, - linksByKey: make(map[linkKey]*link), - close: make(chan struct{}), - done: make(chan struct{}), - endSent: make(chan struct{}), - } - - if opts != nil { - if opts.MaxLinks != 0 { - // MaxLinks is the number of total links. - // handleMax is the max handle ID which starts - // at zero. so we decrement by one - s.handleMax = opts.MaxLinks - 1 - } - } - - // create handle map after options have been applied - s.handles = bitmap.New(s.handleMax) - - s.rxQ = queue.NewHolder(queue.New[frames.FrameBody](int(s.incomingWindow))) - - return s -} - -// waitForFrame waits for an incoming frame to be queued. -// it returns the next frame from the queue, or an error. -// the error is either from the context or conn.doneErr. -// not meant for consumption outside of session.go. -func (s *Session) waitForFrame(ctx context.Context) (frames.FrameBody, error) { - var q *queue.Queue[frames.FrameBody] - select { - case <-ctx.Done(): - return nil, ctx.Err() - case <-s.conn.done: - return nil, s.conn.doneErr - case q = <-s.rxQ.Wait(): - // populated queue - } - - fr := q.Dequeue() - s.rxQ.Release(q) - - return *fr, nil -} - -func (s *Session) begin(ctx context.Context) error { - // send Begin to server - begin := &frames.PerformBegin{ - NextOutgoingID: 0, - IncomingWindow: s.incomingWindow, - OutgoingWindow: s.outgoingWindow, - HandleMax: s.handleMax, - } - - if err := s.txFrameAndWait(ctx, begin); err != nil { - return err - } - - // wait for response - fr, err := s.waitForFrame(ctx) - if err != nil { - // if we exit before receiving the ack, our caller will clean up the channel. - // however, it does mean that the peer will now have assigned an outgoing - // channel ID that's not in use. - return err - } - - begin, ok := fr.(*frames.PerformBegin) - if !ok { - // this codepath is hard to hit (impossible?). if the response isn't a PerformBegin and we've not - // yet seen the remote channel number, the default clause in conn.connReader will protect us from that. - // if we have seen the remote channel number then it's likely the session.mux for that channel will - // either swallow the frame or blow up in some other way, both causing this call to hang. - // deallocate session on error. we can't call - // s.Close() as the session mux hasn't started yet. - debug.Log(1, "RX (Session %p): unexpected begin response frame %T", s, fr) - s.conn.deleteSession(s) - if err := s.conn.Close(); err != nil { - return err - } - return &ConnError{inner: fmt.Errorf("unexpected begin response: %#v", fr)} - } - - // start Session multiplexor - go s.mux(begin) - - return nil -} - -// Close closes the session. -// - ctx controls waiting for the peer to acknowledge the session is closed -// -// If the context's deadline expires or is cancelled before the operation -// completes, an error is returned. However, the operation will continue to -// execute in the background. Subsequent calls will return a *SessionError -// that contains the context's error message. -func (s *Session) Close(ctx context.Context) error { - var ctxErr error - s.closeOnce.Do(func() { - close(s.close) - - // once the mux has received the ack'ing end performative, the mux will - // exit which deletes the session and closes s.done. - select { - case <-s.done: - s.closeErr = s.doneErr - - case <-ctx.Done(): - // notify the caller that the close timed out/was cancelled. - // the mux will remain running and once the ack is received it will terminate. - ctxErr = ctx.Err() - - // record that the close timed out/was cancelled. - // subsequent calls to Close() will return this - debug.Log(1, "TX (Session %p) channel %d: %v", s, s.channel, ctxErr) - s.closeErr = &SessionError{inner: ctxErr} - } - }) - - if ctxErr != nil { - return ctxErr - } - - var sessionErr *SessionError - if errors.As(s.closeErr, &sessionErr) && sessionErr.RemoteErr == nil && sessionErr.inner == nil { - // an empty SessionError means the session was cleanly closed by the caller - return nil - } - return s.closeErr -} - -// txFrame sends a frame to the connWriter. -// - ctx is used to provide the write deadline -// - fr is the frame to write to net.Conn -// - sent is the optional channel that will contain the error if the write fails -func (s *Session) txFrame(ctx context.Context, fr frames.FrameBody, sent chan error) { - debug.Log(2, "TX (Session %p) mux frame to Conn (%p): %s", s, s.conn, fr) - s.conn.sendFrame(ctx, frames.Frame{ - Type: frames.TypeAMQP, - Channel: s.channel, - Body: fr, - }, sent) -} - -// txFrameAndWait sends a frame to the connWriter and waits for the write to complete -// - ctx is used to provide the write deadline -// - fr is the frame to write to net.Conn -func (s *Session) txFrameAndWait(ctx context.Context, fr frames.FrameBody) error { - sent := make(chan error, 1) - s.txFrame(ctx, fr, sent) - - select { - case err := <-sent: - return err - case <-s.conn.done: - return s.conn.doneErr - case <-s.done: - return s.doneErr - } -} - -// NewReceiver opens a new receiver link on the session. -// - ctx controls waiting for the peer to create a sending terminus -// - source is the name of the peer's sending terminus -// - opts contains optional values, pass nil to accept the defaults -// -// If the context's deadline expires or is cancelled before the operation -// completes, an error is returned. If the Receiver was successfully -// created, it will be cleaned up in future calls to NewReceiver. -func (s *Session) NewReceiver(ctx context.Context, source string, opts *ReceiverOptions) (*Receiver, error) { - r, err := newReceiver(source, s, opts) - if err != nil { - return nil, err - } - if err = r.attach(ctx); err != nil { - return nil, err - } - - go r.mux(receiverTestHooks{}) - - return r, nil -} - -// NewSender opens a new sender link on the session. -// - ctx controls waiting for the peer to create a receiver terminus -// - target is the name of the peer's receiver terminus -// - opts contains optional values, pass nil to accept the defaults -// -// If the context's deadline expires or is cancelled before the operation -// completes, an error is returned. If the Sender was successfully -// created, it will be cleaned up in future calls to NewSender. -func (s *Session) NewSender(ctx context.Context, target string, opts *SenderOptions) (*Sender, error) { - l, err := newSender(target, s, opts) - if err != nil { - return nil, err - } - if err = l.attach(ctx); err != nil { - return nil, err - } - - go l.mux(senderTestHooks{}) - - return l, nil -} - -func (s *Session) mux(remoteBegin *frames.PerformBegin) { - defer func() { - if s.doneErr == nil { - s.doneErr = &SessionError{} - } else if connErr := (&ConnError{}); !errors.As(s.doneErr, &connErr) { - // only wrap non-ConnError error types - var amqpErr *Error - if errors.As(s.doneErr, &amqpErr) { - s.doneErr = &SessionError{RemoteErr: amqpErr} - } else { - s.doneErr = &SessionError{inner: s.doneErr} - } - } - // Signal goroutines waiting on the session. - close(s.done) - }() - - var ( - links = make(map[uint32]*link) // mapping of remote handles to links - handlesByDeliveryID = make(map[uint32]uint32) // mapping of deliveryIDs to handles - deliveryIDByHandle = make(map[uint32]uint32) // mapping of handles to latest deliveryID - handlesByRemoteDeliveryID = make(map[uint32]uint32) // mapping of remote deliveryID to handles - - settlementByDeliveryID = make(map[uint32]chan encoding.DeliveryState) - - nextDeliveryID uint32 // tracks the next delivery ID for outgoing transfers - - // flow control values - nextOutgoingID uint32 - nextIncomingID = remoteBegin.NextOutgoingID - remoteIncomingWindow = remoteBegin.IncomingWindow - remoteOutgoingWindow = remoteBegin.OutgoingWindow - - closeInProgress bool // indicates the end performative has been sent - ) - - closeWithError := func(e1 *Error, e2 error) { - if closeInProgress { - debug.Log(3, "TX (Session %p): close already pending, discarding %v", s, e1) - return - } - - closeInProgress = true - s.doneErr = e2 - s.txFrame(context.Background(), &frames.PerformEnd{Error: e1}, nil) - close(s.endSent) - } - - for { - txTransfer := s.txTransfer - // disable txTransfer if flow control windows have been exceeded - if remoteIncomingWindow == 0 || s.outgoingWindow == 0 { - debug.Log(1, "TX (Session %p): disabling txTransfer - window exceeded. remoteIncomingWindow: %d outgoingWindow: %d", - s, remoteIncomingWindow, s.outgoingWindow) - txTransfer = nil - } - - tx := s.tx - closed := s.close - if closeInProgress { - // swap out channel so it no longer triggers - closed = nil - - // once the end performative is sent, we're not allowed to send any frames - tx = nil - txTransfer = nil - } - - // notes on client-side closing session - // when session is closed, we must keep the mux running until the ack'ing end performative - // has been received. during this window, the session is allowed to receive frames but cannot - // send them. - // client-side close happens either by user calling Session.Close() or due to mux initiated - // close due to a violation of some invariant (see sending &Error{} to s.close). in the case - // that both code paths have been triggered, we must be careful to preserve the error that - // triggered the mux initiated close so it can be surfaced to the caller. - - select { - // conn has completed, exit - case <-s.conn.done: - s.doneErr = s.conn.doneErr - return - - case <-closed: - if closeInProgress { - // a client-side close due to protocol error is in progress - continue - } - // session is being closed by the client - closeInProgress = true - s.txFrame(context.Background(), &frames.PerformEnd{}, nil) - close(s.endSent) - - // incoming frame - case q := <-s.rxQ.Wait(): - fr := *q.Dequeue() - s.rxQ.Release(q) - debug.Log(2, "RX (Session %p): %s", s, fr) - - switch body := fr.(type) { - // Disposition frames can reference transfers from more than one - // link. Send this frame to all of them. - case *frames.PerformDisposition: - start := body.First - end := start - if body.Last != nil { - end = *body.Last - } - for deliveryID := start; deliveryID <= end; deliveryID++ { - handles := handlesByDeliveryID - if body.Role == encoding.RoleSender { - handles = handlesByRemoteDeliveryID - } - - handle, ok := handles[deliveryID] - if !ok { - debug.Log(2, "RX (Session %p): role %s: didn't find deliveryID %d in handles map", s, body.Role, deliveryID) - continue - } - delete(handles, deliveryID) - - if body.Settled && body.Role == encoding.RoleReceiver { - // check if settlement confirmation was requested, if so - // confirm by closing channel - if done, ok := settlementByDeliveryID[deliveryID]; ok { - delete(settlementByDeliveryID, deliveryID) - select { - case done <- body.State: - default: - } - close(done) - } - } - - link, ok := links[handle] - if !ok { - closeWithError(&Error{ - Condition: ErrCondUnattachedHandle, - Description: "received disposition frame referencing a handle that's not in use", - }, fmt.Errorf("received disposition frame with unknown link handle %d", handle)) - continue - } - - s.muxFrameToLink(link, fr) - } - continue - case *frames.PerformFlow: - if body.NextIncomingID == nil { - // This is a protocol error: - // "[...] MUST be set if the peer has received - // the begin frame for the session" - closeWithError(&Error{ - Condition: ErrCondNotAllowed, - Description: "next-incoming-id not set after session established", - }, errors.New("protocol error: received flow without next-incoming-id after session established")) - continue - } - - // "When the endpoint receives a flow frame from its peer, - // it MUST update the next-incoming-id directly from the - // next-outgoing-id of the frame, and it MUST update the - // remote-outgoing-window directly from the outgoing-window - // of the frame." - nextIncomingID = body.NextOutgoingID - remoteOutgoingWindow = body.OutgoingWindow - - // "The remote-incoming-window is computed as follows: - // - // next-incoming-id(flow) + incoming-window(flow) - next-outgoing-id(endpoint) - // - // If the next-incoming-id field of the flow frame is not set, then remote-incoming-window is computed as follows: - // - // initial-outgoing-id(endpoint) + incoming-window(flow) - next-outgoing-id(endpoint)" - remoteIncomingWindow = body.IncomingWindow - nextOutgoingID - remoteIncomingWindow += *body.NextIncomingID - debug.Log(3, "RX (Session %p): flow - remoteOutgoingWindow: %d remoteIncomingWindow: %d nextOutgoingID: %d", s, remoteOutgoingWindow, remoteIncomingWindow, nextOutgoingID) - - // Send to link if handle is set - if body.Handle != nil { - link, ok := links[*body.Handle] - if !ok { - closeWithError(&Error{ - Condition: ErrCondUnattachedHandle, - Description: "received flow frame referencing a handle that's not in use", - }, fmt.Errorf("received flow frame with unknown link handle %d", body.Handle)) - continue - } - - s.muxFrameToLink(link, fr) - continue - } - - if body.Echo && !closeInProgress { - niID := nextIncomingID - resp := &frames.PerformFlow{ - NextIncomingID: &niID, - IncomingWindow: s.incomingWindow, - NextOutgoingID: nextOutgoingID, - OutgoingWindow: s.outgoingWindow, - } - s.txFrame(context.Background(), resp, nil) - } - - case *frames.PerformAttach: - // On Attach response link should be looked up by name, then added - // to the links map with the remote's handle contained in this - // attach frame. - // - // Note body.Role is the remote peer's role, we reverse for the local key. - s.linksMu.RLock() - link, linkOk := s.linksByKey[linkKey{name: body.Name, role: !body.Role}] - s.linksMu.RUnlock() - if !linkOk { - closeWithError(&Error{ - Condition: ErrCondNotAllowed, - Description: "received mismatched attach frame", - }, fmt.Errorf("protocol error: received mismatched attach frame %+v", body)) - continue - } - - link.remoteHandle = body.Handle - links[link.remoteHandle] = link - - s.muxFrameToLink(link, fr) - - case *frames.PerformTransfer: - s.needFlowCount++ - // "Upon receiving a transfer, the receiving endpoint will - // increment the next-incoming-id to match the implicit - // transfer-id of the incoming transfer plus one, as well - // as decrementing the remote-outgoing-window, and MAY - // (depending on policy) decrement its incoming-window." - nextIncomingID++ - // don't loop to intmax - if remoteOutgoingWindow > 0 { - remoteOutgoingWindow-- - } - link, ok := links[body.Handle] - if !ok { - closeWithError(&Error{ - Condition: ErrCondUnattachedHandle, - Description: "received transfer frame referencing a handle that's not in use", - }, fmt.Errorf("received transfer frame with unknown link handle %d", body.Handle)) - continue - } - - s.muxFrameToLink(link, fr) - - // if this message is received unsettled and link rcv-settle-mode == second, add to handlesByRemoteDeliveryID - if !body.Settled && body.DeliveryID != nil && link.receiverSettleMode != nil && *link.receiverSettleMode == ReceiverSettleModeSecond { - debug.Log(1, "RX (Session %p): adding handle to handlesByRemoteDeliveryID. delivery ID: %d", s, *body.DeliveryID) - handlesByRemoteDeliveryID[*body.DeliveryID] = body.Handle - } - - // Update peer's outgoing window if half has been consumed. - if s.needFlowCount >= s.incomingWindow/2 && !closeInProgress { - debug.Log(3, "RX (Session %p): channel %d: flow - s.needFlowCount(%d) >= s.incomingWindow(%d)/2\n", s, s.channel, s.needFlowCount, s.incomingWindow) - s.needFlowCount = 0 - nID := nextIncomingID - flow := &frames.PerformFlow{ - NextIncomingID: &nID, - IncomingWindow: s.incomingWindow, - NextOutgoingID: nextOutgoingID, - OutgoingWindow: s.outgoingWindow, - } - s.txFrame(context.Background(), flow, nil) - } - - case *frames.PerformDetach: - link, ok := links[body.Handle] - if !ok { - closeWithError(&Error{ - Condition: ErrCondUnattachedHandle, - Description: "received detach frame referencing a handle that's not in use", - }, fmt.Errorf("received detach frame with unknown link handle %d", body.Handle)) - continue - } - s.muxFrameToLink(link, fr) - - // we received a detach frame and sent it to the link. - // this was either the response to a client-side initiated - // detach or our peer detached us. either way, now that - // the link has processed the frame it's detached so we - // are safe to clean up its state. - delete(links, link.remoteHandle) - delete(deliveryIDByHandle, link.handle) - s.deallocateHandle(link) - - case *frames.PerformEnd: - // there are two possibilities: - // - this is the ack to a client-side Close() - // - the peer is ending the session so we must ack - - if closeInProgress { - return - } - - // peer detached us with an error, save it and send the ack - if body.Error != nil { - s.doneErr = body.Error - } - - fr := frames.PerformEnd{} - s.txFrame(context.Background(), &fr, nil) - - // per spec, when end is received, we're no longer allowed to receive frames - return - - default: - debug.Log(1, "RX (Session %p): unexpected frame: %s\n", s, body) - closeWithError(&Error{ - Condition: ErrCondInternalError, - Description: "session received unexpected frame", - }, fmt.Errorf("internal error: unexpected frame %T", body)) - } - - case env := <-txTransfer: - fr := &env.Frame - // record current delivery ID - var deliveryID uint32 - if fr.DeliveryID == &needsDeliveryID { - deliveryID = nextDeliveryID - fr.DeliveryID = &deliveryID - nextDeliveryID++ - deliveryIDByHandle[fr.Handle] = deliveryID - - // add to handleByDeliveryID if not sender-settled - if !fr.Settled { - handlesByDeliveryID[deliveryID] = fr.Handle - } - } else { - // if fr.DeliveryID is nil it must have been added - // to deliveryIDByHandle already - deliveryID = deliveryIDByHandle[fr.Handle] - } - - // log after the delivery ID has been assigned - debug.Log(2, "TX (Session %p): %d, %s", s, s.channel, fr) - - // frame has been sender-settled, remove from map - if fr.Settled { - delete(handlesByDeliveryID, deliveryID) - } - - s.txFrame(env.Ctx, fr, env.Sent) - if sendErr := <-env.Sent; sendErr != nil { - s.doneErr = sendErr - - // put the error back as our sender will read from this channel - env.Sent <- sendErr - return - } - - // if not settled, add done chan to map - if !fr.Settled && fr.Done != nil { - settlementByDeliveryID[deliveryID] = fr.Done - } else if fr.Done != nil { - // sender-settled, close done now that the transfer has been sent - close(fr.Done) - } - - // "Upon sending a transfer, the sending endpoint will increment - // its next-outgoing-id, decrement its remote-incoming-window, - // and MAY (depending on policy) decrement its outgoing-window." - nextOutgoingID++ - // don't decrement if we're at 0 or we could loop to int max - if remoteIncomingWindow != 0 { - remoteIncomingWindow-- - } - - case env := <-tx: - fr := env.FrameBody - debug.Log(2, "TX (Session %p): %d, %s", s, s.channel, fr) - switch fr := env.FrameBody.(type) { - case *frames.PerformDisposition: - if fr.Settled && fr.Role == encoding.RoleSender { - // sender with a peer that's in mode second; sending confirmation of disposition. - // disposition frames can reference a range of delivery IDs, although it's highly - // likely in this case there will only be one. - start := fr.First - end := start - if fr.Last != nil { - end = *fr.Last - } - for deliveryID := start; deliveryID <= end; deliveryID++ { - // send delivery state to the channel and close it to signal - // that the delivery has completed. - if done, ok := settlementByDeliveryID[deliveryID]; ok { - delete(settlementByDeliveryID, deliveryID) - select { - case done <- fr.State: - default: - } - close(done) - } - } - } - s.txFrame(env.Ctx, fr, env.Sent) - case *frames.PerformFlow: - niID := nextIncomingID - fr.NextIncomingID = &niID - fr.IncomingWindow = s.incomingWindow - fr.NextOutgoingID = nextOutgoingID - fr.OutgoingWindow = s.outgoingWindow - s.txFrame(context.Background(), fr, env.Sent) - case *frames.PerformTransfer: - panic("transfer frames must use txTransfer") - default: - s.txFrame(context.Background(), fr, env.Sent) - } - } - } -} - -func (s *Session) allocateHandle(ctx context.Context, l *link) error { - s.linksMu.Lock() - defer s.linksMu.Unlock() - - // Check if link name already exists, if so then an error should be returned - existing := s.linksByKey[l.key] - if existing != nil { - return fmt.Errorf("link with name '%v' already exists", l.key.name) - } - - next, ok := s.handles.Next() - if !ok { - if err := s.Close(ctx); err != nil { - return err - } - // handle numbers are zero-based, report the actual count - return &SessionError{inner: fmt.Errorf("reached session handle max (%d)", s.handleMax+1)} - } - - l.handle = next // allocate handle to the link - s.linksByKey[l.key] = l // add to mapping - - return nil -} - -func (s *Session) deallocateHandle(l *link) { - s.linksMu.Lock() - defer s.linksMu.Unlock() - - delete(s.linksByKey, l.key) - s.handles.Remove(l.handle) -} - -func (s *Session) abandonLink(l *link) { - s.abandonedLinksMu.Lock() - defer s.abandonedLinksMu.Unlock() - s.abandonedLinks = append(s.abandonedLinks, l) -} - -func (s *Session) freeAbandonedLinks(ctx context.Context) error { - s.abandonedLinksMu.Lock() - defer s.abandonedLinksMu.Unlock() - - debug.Log(3, "TX (Session %p): cleaning up %d abandoned links", s, len(s.abandonedLinks)) - - for _, l := range s.abandonedLinks { - dr := &frames.PerformDetach{ - Handle: l.handle, - Closed: true, - } - if err := s.txFrameAndWait(ctx, dr); err != nil { - return err - } - } - - s.abandonedLinks = nil - return nil -} - -func (s *Session) muxFrameToLink(l *link, fr frames.FrameBody) { - q := l.rxQ.Acquire() - q.Enqueue(fr) - l.rxQ.Release(q) - debug.Log(2, "RX (Session %p): mux frame to link (%p): %s, %s", s, l, l.key.name, fr) -} - -// transferEnvelope is used by senders to send transfer frames -type transferEnvelope struct { - Ctx context.Context - Frame frames.PerformTransfer - - // Sent is *never* nil as we use this for confirmation of sending - // NOTE: use a buffered channel of size 1 when populating - Sent chan error -} - -// frameBodyEnvelope is used by senders and receivers to send frames. -type frameBodyEnvelope struct { - Ctx context.Context - FrameBody frames.FrameBody - - // Sent *can* be nil depending on what frame is being sent. - // e.g. sending a disposition frame frame a receiver's settlement - // APIs will have a non-nil channel vs sending a flow frame - // NOTE: use a buffered channel of size 1 when populating - Sent chan error -} - -// the address of this var is a sentinel value indicating -// that a transfer frame is in need of a delivery ID -var needsDeliveryID uint32 diff --git a/sdk/messaging/azservicebus/internal/mgmt.go b/sdk/messaging/azservicebus/internal/mgmt.go index 74f42d117b0e..cf57ba06e004 100644 --- a/sdk/messaging/azservicebus/internal/mgmt.go +++ b/sdk/messaging/azservicebus/internal/mgmt.go @@ -12,7 +12,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/amqpwrap" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp" + "github.com/Azure/go-amqp" ) type Disposition struct { diff --git a/sdk/messaging/azservicebus/internal/mock/emulation/events.go b/sdk/messaging/azservicebus/internal/mock/emulation/events.go index e2e19e32e628..d34791841e05 100644 --- a/sdk/messaging/azservicebus/internal/mock/emulation/events.go +++ b/sdk/messaging/azservicebus/internal/mock/emulation/events.go @@ -7,7 +7,7 @@ import ( "fmt" "sync" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp" + "github.com/Azure/go-amqp" ) type EventType string diff --git a/sdk/messaging/azservicebus/internal/mock/emulation/mock_data.go b/sdk/messaging/azservicebus/internal/mock/emulation/mock_data.go index cb2fd0ded2d7..6e49410a9191 100644 --- a/sdk/messaging/azservicebus/internal/mock/emulation/mock_data.go +++ b/sdk/messaging/azservicebus/internal/mock/emulation/mock_data.go @@ -17,9 +17,9 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/internal/log" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/amqpwrap" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/auth" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/mock" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/sbauth" + "github.com/Azure/go-amqp" "github.com/golang/mock/gomock" ) diff --git a/sdk/messaging/azservicebus/internal/mock/emulation/mock_data_receiver.go b/sdk/messaging/azservicebus/internal/mock/emulation/mock_data_receiver.go index 73dc7c15720c..bcef3023fcc3 100644 --- a/sdk/messaging/azservicebus/internal/mock/emulation/mock_data_receiver.go +++ b/sdk/messaging/azservicebus/internal/mock/emulation/mock_data_receiver.go @@ -10,8 +10,8 @@ import ( "sync/atomic" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/amqpwrap" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/mock" + "github.com/Azure/go-amqp" "github.com/golang/mock/gomock" ) diff --git a/sdk/messaging/azservicebus/internal/mock/emulation/mock_data_sender.go b/sdk/messaging/azservicebus/internal/mock/emulation/mock_data_sender.go index aa02398a85c4..7ee49c436bc3 100644 --- a/sdk/messaging/azservicebus/internal/mock/emulation/mock_data_sender.go +++ b/sdk/messaging/azservicebus/internal/mock/emulation/mock_data_sender.go @@ -9,8 +9,8 @@ import ( "sync/atomic" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/amqpwrap" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/mock" + "github.com/Azure/go-amqp" "github.com/golang/mock/gomock" ) diff --git a/sdk/messaging/azservicebus/internal/mock/emulation/mock_data_session.go b/sdk/messaging/azservicebus/internal/mock/emulation/mock_data_session.go index 5bc7529d25da..0b3f34e8deef 100644 --- a/sdk/messaging/azservicebus/internal/mock/emulation/mock_data_session.go +++ b/sdk/messaging/azservicebus/internal/mock/emulation/mock_data_session.go @@ -7,8 +7,8 @@ import ( "context" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/amqpwrap" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/mock" + "github.com/Azure/go-amqp" "github.com/golang/mock/gomock" ) diff --git a/sdk/messaging/azservicebus/internal/mock/emulation/mock_data_test.go b/sdk/messaging/azservicebus/internal/mock/emulation/mock_data_test.go index 8d6ea6c2e2a1..7cca0c64d372 100644 --- a/sdk/messaging/azservicebus/internal/mock/emulation/mock_data_test.go +++ b/sdk/messaging/azservicebus/internal/mock/emulation/mock_data_test.go @@ -8,8 +8,8 @@ import ( "testing" "time" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/mock/emulation" + "github.com/Azure/go-amqp" "github.com/stretchr/testify/require" ) diff --git a/sdk/messaging/azservicebus/internal/mock/emulation/queue.go b/sdk/messaging/azservicebus/internal/mock/emulation/queue.go index faf80a446e82..f336d67c6cc0 100644 --- a/sdk/messaging/azservicebus/internal/mock/emulation/queue.go +++ b/sdk/messaging/azservicebus/internal/mock/emulation/queue.go @@ -9,8 +9,8 @@ import ( "sync" azlog "github.com/Azure/azure-sdk-for-go/sdk/internal/log" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/test" + "github.com/Azure/go-amqp" ) type Operation struct { diff --git a/sdk/messaging/azservicebus/internal/mock/emulation/queue_test.go b/sdk/messaging/azservicebus/internal/mock/emulation/queue_test.go index 0a9b822ef539..636d71e93010 100644 --- a/sdk/messaging/azservicebus/internal/mock/emulation/queue_test.go +++ b/sdk/messaging/azservicebus/internal/mock/emulation/queue_test.go @@ -8,8 +8,8 @@ import ( "testing" "time" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/mock/emulation" + "github.com/Azure/go-amqp" "github.com/stretchr/testify/require" ) diff --git a/sdk/messaging/azservicebus/internal/mock/mock_amqp.go b/sdk/messaging/azservicebus/internal/mock/mock_amqp.go index 5ee2c0e5ea6a..9bd0b22f579d 100644 --- a/sdk/messaging/azservicebus/internal/mock/mock_amqp.go +++ b/sdk/messaging/azservicebus/internal/mock/mock_amqp.go @@ -13,7 +13,7 @@ import ( reflect "reflect" amqpwrap "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/amqpwrap" - amqp "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp" + amqp "github.com/Azure/go-amqp" gomock "github.com/golang/mock/gomock" ) diff --git a/sdk/messaging/azservicebus/internal/mock/mock_helpers.go b/sdk/messaging/azservicebus/internal/mock/mock_helpers.go index 1695aaf5c3d6..fa4480069435 100644 --- a/sdk/messaging/azservicebus/internal/mock/mock_helpers.go +++ b/sdk/messaging/azservicebus/internal/mock/mock_helpers.go @@ -8,7 +8,7 @@ import ( "fmt" "time" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp" + "github.com/Azure/go-amqp" gomock "github.com/golang/mock/gomock" ) diff --git a/sdk/messaging/azservicebus/internal/mock/mock_rpc.go b/sdk/messaging/azservicebus/internal/mock/mock_rpc.go index 94fb00fcf2cb..193c3653f18d 100644 --- a/sdk/messaging/azservicebus/internal/mock/mock_rpc.go +++ b/sdk/messaging/azservicebus/internal/mock/mock_rpc.go @@ -13,7 +13,7 @@ import ( reflect "reflect" amqpwrap "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/amqpwrap" - amqp "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp" + amqp "github.com/Azure/go-amqp" gomock "github.com/golang/mock/gomock" ) diff --git a/sdk/messaging/azservicebus/internal/namespace.go b/sdk/messaging/azservicebus/internal/namespace.go index 8d40a7ab6f36..c56a1e083067 100644 --- a/sdk/messaging/azservicebus/internal/namespace.go +++ b/sdk/messaging/azservicebus/internal/namespace.go @@ -20,9 +20,9 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/auth" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/conn" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/sbauth" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/utils" + "github.com/Azure/go-amqp" ) var rootUserAgent = telemetry.Format("azservicebus", Version) diff --git a/sdk/messaging/azservicebus/internal/namespace_test.go b/sdk/messaging/azservicebus/internal/namespace_test.go index 910eb1eeedd2..82d7ef9aed27 100644 --- a/sdk/messaging/azservicebus/internal/namespace_test.go +++ b/sdk/messaging/azservicebus/internal/namespace_test.go @@ -15,9 +15,9 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/internal/telemetry" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/amqpwrap" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/auth" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/sbauth" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/test" + "github.com/Azure/go-amqp" "github.com/stretchr/testify/require" ) diff --git a/sdk/messaging/azservicebus/internal/rpc.go b/sdk/messaging/azservicebus/internal/rpc.go index 2dbb180b54a1..a23fede1964a 100644 --- a/sdk/messaging/azservicebus/internal/rpc.go +++ b/sdk/messaging/azservicebus/internal/rpc.go @@ -14,7 +14,7 @@ import ( azlog "github.com/Azure/azure-sdk-for-go/sdk/internal/log" "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/amqpwrap" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp" + "github.com/Azure/go-amqp" ) const ( diff --git a/sdk/messaging/azservicebus/internal/rpc_test.go b/sdk/messaging/azservicebus/internal/rpc_test.go index 6ca286c85d9a..704e4137b1f7 100644 --- a/sdk/messaging/azservicebus/internal/rpc_test.go +++ b/sdk/messaging/azservicebus/internal/rpc_test.go @@ -10,9 +10,9 @@ import ( "testing" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/amqpwrap" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/mock" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/test" + "github.com/Azure/go-amqp" "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" ) diff --git a/sdk/messaging/azservicebus/internal/test/test_helpers.go b/sdk/messaging/azservicebus/internal/test/test_helpers.go index 49636ec457ed..655c94bc4d64 100644 --- a/sdk/messaging/azservicebus/internal/test/test_helpers.go +++ b/sdk/messaging/azservicebus/internal/test/test_helpers.go @@ -18,7 +18,7 @@ import ( azlog "github.com/Azure/azure-sdk-for-go/sdk/internal/log" "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/atom" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp" + "github.com/Azure/go-amqp" "github.com/stretchr/testify/require" ) diff --git a/sdk/messaging/azservicebus/internal/utils/retrier_test.go b/sdk/messaging/azservicebus/internal/utils/retrier_test.go index 4c7282a5e897..cc0551c61033 100644 --- a/sdk/messaging/azservicebus/internal/utils/retrier_test.go +++ b/sdk/messaging/azservicebus/internal/utils/retrier_test.go @@ -14,8 +14,8 @@ import ( azlog "github.com/Azure/azure-sdk-for-go/sdk/internal/log" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/test" + "github.com/Azure/go-amqp" "github.com/stretchr/testify/require" ) diff --git a/sdk/messaging/azservicebus/message.go b/sdk/messaging/azservicebus/message.go index b3247fd5d355..40b690b0f548 100644 --- a/sdk/messaging/azservicebus/message.go +++ b/sdk/messaging/azservicebus/message.go @@ -9,7 +9,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/internal/log" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp" + "github.com/Azure/go-amqp" ) // ReceivedMessage is a received message from a Client.NewReceiver(). diff --git a/sdk/messaging/azservicebus/messageSettler.go b/sdk/messaging/azservicebus/messageSettler.go index 0b787efadb67..915cc020d905 100644 --- a/sdk/messaging/azservicebus/messageSettler.go +++ b/sdk/messaging/azservicebus/messageSettler.go @@ -8,8 +8,8 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/amqpwrap" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/utils" + "github.com/Azure/go-amqp" ) type settler interface { diff --git a/sdk/messaging/azservicebus/message_batch.go b/sdk/messaging/azservicebus/message_batch.go index 1fde3d4bd761..8b1e10a8c5d8 100644 --- a/sdk/messaging/azservicebus/message_batch.go +++ b/sdk/messaging/azservicebus/message_batch.go @@ -8,7 +8,7 @@ import ( "sync" "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp" + "github.com/Azure/go-amqp" ) // ErrMessageTooLarge is returned when a message cannot fit into a batch when using MessageBatch.Add() diff --git a/sdk/messaging/azservicebus/message_batch_test.go b/sdk/messaging/azservicebus/message_batch_test.go index fee360a7bbd8..5ab3e9dbf35b 100644 --- a/sdk/messaging/azservicebus/message_batch_test.go +++ b/sdk/messaging/azservicebus/message_batch_test.go @@ -7,7 +7,7 @@ import ( "sync" "testing" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp" + "github.com/Azure/go-amqp" "github.com/stretchr/testify/require" ) diff --git a/sdk/messaging/azservicebus/message_test.go b/sdk/messaging/azservicebus/message_test.go index 2db45826d68c..3704b7591f35 100644 --- a/sdk/messaging/azservicebus/message_test.go +++ b/sdk/messaging/azservicebus/message_test.go @@ -9,7 +9,7 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp" + "github.com/Azure/go-amqp" "github.com/stretchr/testify/require" ) diff --git a/sdk/messaging/azservicebus/receiver.go b/sdk/messaging/azservicebus/receiver.go index 240fd4b2924b..3646a9c1c8e0 100644 --- a/sdk/messaging/azservicebus/receiver.go +++ b/sdk/messaging/azservicebus/receiver.go @@ -14,8 +14,8 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/amqpwrap" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/utils" + "github.com/Azure/go-amqp" ) // ReceiveMode represents the lock style to use for a receiver - either diff --git a/sdk/messaging/azservicebus/receiver_helpers_test.go b/sdk/messaging/azservicebus/receiver_helpers_test.go index 1a56ae304e9e..bb2fb6698ea4 100644 --- a/sdk/messaging/azservicebus/receiver_helpers_test.go +++ b/sdk/messaging/azservicebus/receiver_helpers_test.go @@ -7,7 +7,7 @@ import ( "context" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/amqpwrap" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp" + "github.com/Azure/go-amqp" ) type StubAMQPReceiver struct { diff --git a/sdk/messaging/azservicebus/receiver_simulated_test.go b/sdk/messaging/azservicebus/receiver_simulated_test.go index 18f7210aacda..b2444e3cb22a 100644 --- a/sdk/messaging/azservicebus/receiver_simulated_test.go +++ b/sdk/messaging/azservicebus/receiver_simulated_test.go @@ -14,10 +14,10 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/amqpwrap" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/mock" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/mock/emulation" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/test" + "github.com/Azure/go-amqp" "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" ) diff --git a/sdk/messaging/azservicebus/receiver_test.go b/sdk/messaging/azservicebus/receiver_test.go index 0d691447c9cc..d1617f77526a 100644 --- a/sdk/messaging/azservicebus/receiver_test.go +++ b/sdk/messaging/azservicebus/receiver_test.go @@ -15,9 +15,9 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/azidentity" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/admin" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/sas" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/test" + "github.com/Azure/go-amqp" "github.com/stretchr/testify/require" ) @@ -610,7 +610,7 @@ func TestReceiverAMQPDataTypes(t *testing.T) { // - TypeCodeDecimal64 // - TypeCodeDecimal128 // - TypeCodeChar (although note below that a 'character' does work, although it's not a TypecodeChar value) - // https://github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp/blob/e0c6c63fb01e6642686ee4f8e7412da042bf35dd/internal/encoding/decode.go#L568 + // https://github.com/Azure/go-amqp/blob/e0c6c63fb01e6642686ee4f8e7412da042bf35dd/internal/encoding/decode.go#L568 "timestamp": expectedTime, "byte": byte(128), diff --git a/sdk/messaging/azservicebus/receiver_unit_test.go b/sdk/messaging/azservicebus/receiver_unit_test.go index 50bf141d70f0..0b131f4ee9ca 100644 --- a/sdk/messaging/azservicebus/receiver_unit_test.go +++ b/sdk/messaging/azservicebus/receiver_unit_test.go @@ -13,8 +13,8 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/amqpwrap" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/exported" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/test" + "github.com/Azure/go-amqp" "github.com/stretchr/testify/require" ) diff --git a/sdk/messaging/azservicebus/sender.go b/sdk/messaging/azservicebus/sender.go index 24e18ab23f58..7e4c31d90773 100644 --- a/sdk/messaging/azservicebus/sender.go +++ b/sdk/messaging/azservicebus/sender.go @@ -10,8 +10,8 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/amqpwrap" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/utils" + "github.com/Azure/go-amqp" ) type ( diff --git a/sdk/messaging/azservicebus/sender_unit_test.go b/sdk/messaging/azservicebus/sender_unit_test.go index 5ab1781b9eba..b676ffc75150 100644 --- a/sdk/messaging/azservicebus/sender_unit_test.go +++ b/sdk/messaging/azservicebus/sender_unit_test.go @@ -8,8 +8,8 @@ import ( "testing" "time" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/mock/emulation" + "github.com/Azure/go-amqp" "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" ) diff --git a/sdk/messaging/azservicebus/session_receiver.go b/sdk/messaging/azservicebus/session_receiver.go index 07ee1ba90276..6b9a0f9af450 100644 --- a/sdk/messaging/azservicebus/session_receiver.go +++ b/sdk/messaging/azservicebus/session_receiver.go @@ -10,8 +10,8 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/amqpwrap" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/utils" + "github.com/Azure/go-amqp" ) // SessionReceiver is a Receiver that handles sessions. diff --git a/sdk/messaging/azservicebus/session_receiver_test.go b/sdk/messaging/azservicebus/session_receiver_test.go index c58e4b6b3797..5a0c89ff877f 100644 --- a/sdk/messaging/azservicebus/session_receiver_test.go +++ b/sdk/messaging/azservicebus/session_receiver_test.go @@ -14,8 +14,8 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/admin" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal" - "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/go-amqp" "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus/internal/test" + "github.com/Azure/go-amqp" "github.com/stretchr/testify/require" ) From 8db51cacc1074bd0f9da0a1250e38bca605a9402 Mon Sep 17 00:00:00 2001 From: Joel Hendrix Date: Tue, 9 May 2023 07:34:38 -0700 Subject: [PATCH 34/50] Add supporting features to enable distributed tracing (#20301) (#20708) * Add supporting features to enable distributed tracing This includes new internal pipeline policies and other supporting types. See the changelog for a full description. Added some missing doc comments. * fix linter issue * add net.peer.name trace attribute sequence custom HTTP header policy before logging policy. sequence logging policy after HTTP trace policy. keep body download policy at the end. * add span for iterating over pages --- sdk/azcore/CHANGELOG.md | 8 + sdk/azcore/arm/runtime/pipeline.go | 3 +- .../arm/runtime/policy_trace_namespace.go | 31 ++++ .../runtime/policy_trace_namespace_test.go | 97 +++++++++++ sdk/azcore/core.go | 3 + sdk/azcore/core_test.go | 41 +++++ sdk/azcore/internal/exported/request.go | 8 + sdk/azcore/internal/exported/request_test.go | 17 ++ sdk/azcore/internal/shared/constants.go | 1 + sdk/azcore/internal/shared/shared.go | 3 + sdk/azcore/policy/policy.go | 3 +- sdk/azcore/runtime/pager.go | 13 ++ sdk/azcore/runtime/pipeline.go | 30 +++- sdk/azcore/runtime/policy_http_trace.go | 117 +++++++++++++ sdk/azcore/runtime/policy_http_trace_test.go | 163 ++++++++++++++++++ sdk/azcore/runtime/poller.go | 94 +++++++--- sdk/azcore/runtime/poller_test.go | 14 ++ sdk/azcore/tracing/tracing.go | 41 ++++- sdk/azcore/tracing/tracing_test.go | 22 ++- 19 files changed, 677 insertions(+), 32 deletions(-) create mode 100644 sdk/azcore/arm/runtime/policy_trace_namespace.go create mode 100644 sdk/azcore/arm/runtime/policy_trace_namespace_test.go create mode 100644 sdk/azcore/runtime/policy_http_trace.go create mode 100644 sdk/azcore/runtime/policy_http_trace_test.go diff --git a/sdk/azcore/CHANGELOG.md b/sdk/azcore/CHANGELOG.md index df18a5205d4e..de9fd6de967c 100644 --- a/sdk/azcore/CHANGELOG.md +++ b/sdk/azcore/CHANGELOG.md @@ -3,6 +3,14 @@ ## 1.6.1 (Unreleased) ### Features Added +* Added supporting features to enable distributed tracing. + * Added func `runtime.StartSpan()` for use by SDKs to start spans. + * Added method `WithContext()` to `runtime.Request` to support shallow cloning with a new context. + * Added field `TracingNamespace` to `runtime.PipelineOptions`. + * Added field `Tracer` to `runtime.NewPollerOptions` and `runtime.NewPollerFromResumeTokenOptions` types. + * Added field `SpanFromContext` to `tracing.TracerOptions`. + * Added methods `Enabled()`, `SetAttributes()`, and `SpanFromContext()` to `tracing.Tracer`. + * Added supporting pipeline policies to include HTTP spans when creating clients. ### Breaking Changes diff --git a/sdk/azcore/arm/runtime/pipeline.go b/sdk/azcore/arm/runtime/pipeline.go index 266c74b17bf1..302c19cd4265 100644 --- a/sdk/azcore/arm/runtime/pipeline.go +++ b/sdk/azcore/arm/runtime/pipeline.go @@ -13,6 +13,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" armpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" azpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" azruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" ) @@ -34,7 +35,7 @@ func NewPipeline(module, version string, cred azcore.TokenCredential, plOpts azr }) perRetry := make([]azpolicy.Policy, len(plOpts.PerRetry), len(plOpts.PerRetry)+1) copy(perRetry, plOpts.PerRetry) - plOpts.PerRetry = append(perRetry, authPolicy) + plOpts.PerRetry = append(perRetry, authPolicy, exported.PolicyFunc(httpTraceNamespacePolicy)) if !options.DisableRPRegistration { regRPOpts := armpolicy.RegistrationOptions{ClientOptions: options.ClientOptions} regPolicy, err := NewRPRegistrationPolicy(cred, ®RPOpts) diff --git a/sdk/azcore/arm/runtime/policy_trace_namespace.go b/sdk/azcore/arm/runtime/policy_trace_namespace.go new file mode 100644 index 000000000000..76aefe8550dd --- /dev/null +++ b/sdk/azcore/arm/runtime/policy_trace_namespace.go @@ -0,0 +1,31 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" +) + +// httpTraceNamespacePolicy is a policy that adds the az.namespace attribute to the current Span +func httpTraceNamespacePolicy(req *policy.Request) (resp *http.Response, err error) { + rawTracer := req.Raw().Context().Value(shared.CtxWithTracingTracer{}) + if tracer, ok := rawTracer.(tracing.Tracer); ok { + rt, err := resource.ParseResourceType(req.Raw().URL.Path) + if err == nil { + // add the namespace attribute to the current span + if span, ok := tracer.SpanFromContext(req.Raw().Context()); ok { + span.SetAttributes(tracing.Attribute{Key: "az.namespace", Value: rt.Namespace}) + } + } + } + return req.Next() +} diff --git a/sdk/azcore/arm/runtime/policy_trace_namespace_test.go b/sdk/azcore/arm/runtime/policy_trace_namespace_test.go new file mode 100644 index 000000000000..4ac7484823f8 --- /dev/null +++ b/sdk/azcore/arm/runtime/policy_trace_namespace_test.go @@ -0,0 +1,97 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "net/http" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" + "github.com/Azure/azure-sdk-for-go/sdk/internal/mock" + "github.com/stretchr/testify/require" +) + +func TestHTTPTraceNamespacePolicy(t *testing.T) { + srv, close := mock.NewServer() + defer close() + + pl := exported.NewPipeline(srv, exported.PolicyFunc(httpTraceNamespacePolicy)) + + // no tracer + req, err := exported.NewRequest(context.Background(), http.MethodGet, srv.URL()) + require.NoError(t, err) + srv.AppendResponse() + _, err = pl.Do(req) + require.NoError(t, err) + + // wrong tracer type + req, err = exported.NewRequest(context.WithValue(context.Background(), shared.CtxWithTracingTracer{}, 0), http.MethodGet, srv.URL()) + require.NoError(t, err) + srv.AppendResponse() + _, err = pl.Do(req) + require.NoError(t, err) + + // no SpanFromContext impl + tr := tracing.NewTracer(func(ctx context.Context, spanName string, options *tracing.SpanOptions) (context.Context, tracing.Span) { + return ctx, tracing.Span{} + }, nil) + req, err = exported.NewRequest(context.WithValue(context.Background(), shared.CtxWithTracingTracer{}, tr), http.MethodGet, srv.URL()) + require.NoError(t, err) + srv.AppendResponse() + _, err = pl.Do(req) + require.NoError(t, err) + + // failed to parse resource ID, shouldn't call SetAttributes + var attrString string + tr = tracing.NewTracer(func(ctx context.Context, spanName string, options *tracing.SpanOptions) (context.Context, tracing.Span) { + return ctx, tracing.Span{} + }, &tracing.TracerOptions{ + SpanFromContext: func(ctx context.Context) (tracing.Span, bool) { + spanImpl := tracing.SpanImpl{ + SetAttributes: func(a ...tracing.Attribute) { + require.Len(t, a, 1) + v, ok := a[0].Value.(string) + require.True(t, ok) + attrString = a[0].Key + ":" + v + }, + } + return tracing.NewSpan(spanImpl), true + }, + }) + req, err = exported.NewRequest(context.WithValue(context.Background(), shared.CtxWithTracingTracer{}, tr), http.MethodGet, srv.URL()) + require.NoError(t, err) + srv.AppendResponse() + _, err = pl.Do(req) + require.NoError(t, err) + require.Empty(t, attrString) + + // success + tr = tracing.NewTracer(func(ctx context.Context, spanName string, options *tracing.SpanOptions) (context.Context, tracing.Span) { + return ctx, tracing.Span{} + }, &tracing.TracerOptions{ + SpanFromContext: func(ctx context.Context) (tracing.Span, bool) { + spanImpl := tracing.SpanImpl{ + SetAttributes: func(a ...tracing.Attribute) { + require.Len(t, a, 1) + v, ok := a[0].Value.(string) + require.True(t, ok) + attrString = a[0].Key + ":" + v + }, + } + return tracing.NewSpan(spanImpl), true + }, + }) + req, err = exported.NewRequest(context.WithValue(context.Background(), shared.CtxWithTracingTracer{}, tr), http.MethodGet, srv.URL()+requestEndpoint) + require.NoError(t, err) + srv.AppendResponse() + _, err = pl.Do(req) + require.NoError(t, err) + require.EqualValues(t, "az.namespace:Microsoft.Storage", attrString) +} diff --git a/sdk/azcore/core.go b/sdk/azcore/core.go index 72c2cf21eef3..29666d2d021f 100644 --- a/sdk/azcore/core.go +++ b/sdk/azcore/core.go @@ -99,6 +99,9 @@ func NewClient(clientName, moduleVersion string, plOpts runtime.PipelineOptions, pl := runtime.NewPipeline(pkg, moduleVersion, plOpts, options) tr := options.TracingProvider.NewTracer(clientName, moduleVersion) + if tr.Enabled() && plOpts.TracingNamespace != "" { + tr.SetAttributes(tracing.Attribute{Key: "az.namespace", Value: plOpts.TracingNamespace}) + } return &Client{pl: pl, tr: tr}, nil } diff --git a/sdk/azcore/core_test.go b/sdk/azcore/core_test.go index 13d3361e1f77..78c4af58bc55 100644 --- a/sdk/azcore/core_test.go +++ b/sdk/azcore/core_test.go @@ -7,11 +7,17 @@ package azcore import ( + "context" + "net/http" "reflect" "testing" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" + "github.com/Azure/azure-sdk-for-go/sdk/internal/mock" "github.com/stretchr/testify/require" ) @@ -131,3 +137,38 @@ func TestNewClientError(t *testing.T) { require.Error(t, err) require.Nil(t, client) } + +func TestNewClientTracingEnabled(t *testing.T) { + srv, close := mock.NewServer() + defer close() + + var attrString string + client, err := NewClient("package.Client", "v1.0.0", runtime.PipelineOptions{TracingNamespace: "Widget.Factory"}, &policy.ClientOptions{ + TracingProvider: tracing.NewProvider(func(name, version string) tracing.Tracer { + return tracing.NewTracer(func(ctx context.Context, spanName string, options *tracing.SpanOptions) (context.Context, tracing.Span) { + require.NotNil(t, options) + for _, attr := range options.Attributes { + if attr.Key == "az.namespace" { + v, ok := attr.Value.(string) + require.True(t, ok) + attrString = attr.Key + ":" + v + } + } + return ctx, tracing.Span{} + }, nil) + }, nil), + Transport: srv, + }) + require.NoError(t, err) + require.NotNil(t, client) + require.NotZero(t, client.Pipeline()) + require.NotZero(t, client.Tracer()) + + const requestEndpoint = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/fakeResourceGroupo/providers/Microsoft.Storage/storageAccounts/fakeAccountName" + req, err := exported.NewRequest(context.WithValue(context.Background(), shared.CtxWithTracingTracer{}, client.Tracer()), http.MethodGet, srv.URL()+requestEndpoint) + require.NoError(t, err) + srv.AppendResponse() + _, err = client.Pipeline().Do(req) + require.NoError(t, err) + require.EqualValues(t, "az.namespace:Widget.Factory", attrString) +} diff --git a/sdk/azcore/internal/exported/request.go b/sdk/azcore/internal/exported/request.go index fa99d1b7ed1f..48229f5ccd68 100644 --- a/sdk/azcore/internal/exported/request.go +++ b/sdk/azcore/internal/exported/request.go @@ -170,6 +170,14 @@ func (req *Request) Clone(ctx context.Context) *Request { return &r2 } +// WithContext returns a shallow copy of the request with its context changed to ctx. +func (req *Request) WithContext(ctx context.Context) *Request { + r2 := new(Request) + *r2 = *req + r2.req = r2.req.WithContext(ctx) + return r2 +} + // not exported but dependent on Request // PolicyFunc is a type that implements the Policy interface. diff --git a/sdk/azcore/internal/exported/request_test.go b/sdk/azcore/internal/exported/request_test.go index d26b734c82c9..3acc8e7a76ae 100644 --- a/sdk/azcore/internal/exported/request_test.go +++ b/sdk/azcore/internal/exported/request_test.go @@ -194,3 +194,20 @@ func TestNewRequestFail(t *testing.T) { t.Fatal("unexpected request") } } + +func TestRequestWithContext(t *testing.T) { + type ctxKey1 struct{} + type ctxKey2 struct{} + + req1, err := NewRequest(context.WithValue(context.Background(), ctxKey1{}, 1), http.MethodPost, testURL) + require.NoError(t, err) + require.NotNil(t, req1.Raw().Context().Value(ctxKey1{})) + + req2 := req1.WithContext(context.WithValue(context.Background(), ctxKey2{}, 1)) + require.Nil(t, req2.Raw().Context().Value(ctxKey1{})) + require.NotNil(t, req2.Raw().Context().Value(ctxKey2{})) + + // shallow copy, so changing req2 affects req1 + req2.Raw().Header.Add("added-req2", "value") + require.EqualValues(t, "value", req1.Raw().Header.Get("added-req2")) +} diff --git a/sdk/azcore/internal/shared/constants.go b/sdk/azcore/internal/shared/constants.go index 269a831ed178..dcd3d098b339 100644 --- a/sdk/azcore/internal/shared/constants.go +++ b/sdk/azcore/internal/shared/constants.go @@ -23,6 +23,7 @@ const ( HeaderUserAgent = "User-Agent" HeaderWWWAuthenticate = "WWW-Authenticate" HeaderXMSClientRequestID = "x-ms-client-request-id" + HeaderXMSRequestID = "x-ms-request-id" ) const BearerTokenPrefix = "Bearer " diff --git a/sdk/azcore/internal/shared/shared.go b/sdk/azcore/internal/shared/shared.go index 930ab8c83999..9bd054b3643e 100644 --- a/sdk/azcore/internal/shared/shared.go +++ b/sdk/azcore/internal/shared/shared.go @@ -26,6 +26,9 @@ type CtxWithRetryOptionsKey struct{} // CtxIncludeResponseKey is used as a context key for retrieving the raw response. type CtxIncludeResponseKey struct{} +// CtxWithTracingTracer is used as a context key for adding/retrieving tracing.Tracer. +type CtxWithTracingTracer struct{} + // Delay waits for the duration to elapse or the context to be cancelled. func Delay(ctx context.Context, delay time.Duration) error { select { diff --git a/sdk/azcore/policy/policy.go b/sdk/azcore/policy/policy.go index b200047834ce..2d7ad45f6167 100644 --- a/sdk/azcore/policy/policy.go +++ b/sdk/azcore/policy/policy.go @@ -29,7 +29,8 @@ type Request = exported.Request // ClientOptions contains optional settings for a client's pipeline. // All zero-value fields will be initialized with default values. type ClientOptions struct { - // APIVersion overrides the default version requested of the service. Set with caution as this package version has not been tested with arbitrary service versions. + // APIVersion overrides the default version requested of the service. + // Set with caution as this package version has not been tested with arbitrary service versions. APIVersion string // Cloud specifies a cloud for the client. The default is Azure Public Cloud. diff --git a/sdk/azcore/runtime/pager.go b/sdk/azcore/runtime/pager.go index 5507665d651d..b7e59527a3d8 100644 --- a/sdk/azcore/runtime/pager.go +++ b/sdk/azcore/runtime/pager.go @@ -10,6 +10,10 @@ import ( "context" "encoding/json" "errors" + "fmt" + "reflect" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" ) // PagingHandler contains the required data for constructing a Pager. @@ -20,12 +24,16 @@ type PagingHandler[T any] struct { // Fetcher fetches the first and subsequent pages. Fetcher func(context.Context, *T) (T, error) + + // Tracer contains the Tracer from the client that's creating the Pager. + Tracer tracing.Tracer } // Pager provides operations for iterating over paged responses. type Pager[T any] struct { current *T handler PagingHandler[T] + tracer tracing.Tracer firstPage bool } @@ -34,6 +42,7 @@ type Pager[T any] struct { func NewPager[T any](handler PagingHandler[T]) *Pager[T] { return &Pager[T]{ handler: handler, + tracer: handler.Tracer, firstPage: true, } } @@ -58,10 +67,14 @@ func (p *Pager[T]) NextPage(ctx context.Context) (T, error) { } else if !p.handler.More(*p.current) { return *new(T), errors.New("no more pages") } + ctx, endSpan := StartSpan(ctx, fmt.Sprintf("%s.NextPage", shortenTypeName(reflect.TypeOf(*p).Name())), p.tracer, nil) + defer endSpan(err) resp, err = p.handler.Fetcher(ctx, p.current) } else { // non-LRO case, first page p.firstPage = false + ctx, endSpan := StartSpan(ctx, fmt.Sprintf("%s.NextPage", shortenTypeName(reflect.TypeOf(*p).Name())), p.tracer, nil) + defer endSpan(err) resp, err = p.handler.Fetcher(ctx, nil) } if err != nil { diff --git a/sdk/azcore/runtime/pipeline.go b/sdk/azcore/runtime/pipeline.go index 9d9288f53d3d..ffb3ea1fcf65 100644 --- a/sdk/azcore/runtime/pipeline.go +++ b/sdk/azcore/runtime/pipeline.go @@ -13,9 +13,29 @@ import ( // PipelineOptions contains Pipeline options for SDK developers type PipelineOptions struct { - AllowedHeaders, AllowedQueryParameters []string - APIVersion APIVersionOptions - PerCall, PerRetry []policy.Policy + // AllowedHeaders is the slice of headers to log with their values intact. + // All headers not in the slice will have their values REDACTED. + // Applies to request and response headers. + AllowedHeaders []string + + // AllowedQueryParameters is the slice of query parameters to log with their values intact. + // All query parameters not in the slice will have their values REDACTED. + AllowedQueryParameters []string + + // APIVersion overrides the default version requested of the service. + // Set with caution as this package version has not been tested with arbitrary service versions. + APIVersion APIVersionOptions + + // PerCall contains custom policies to inject into the pipeline. + // Each policy is executed once per request. + PerCall []policy.Policy + + // PerRetry contains custom policies to inject into the pipeline. + // Each policy is executed once per request, and for each retry of that request. + PerRetry []policy.Policy + + // TracingNamespace contains the value to use for the az.namespace span attribute. + TracingNamespace string } // Pipeline represents a primitive for sending HTTP requests and receiving responses. @@ -56,8 +76,10 @@ func NewPipeline(module, version string, plOpts PipelineOptions, options *policy policies = append(policies, NewRetryPolicy(&cp.Retry)) policies = append(policies, plOpts.PerRetry...) policies = append(policies, cp.PerRetryPolicies...) + policies = append(policies, exported.PolicyFunc(httpHeaderPolicy)) + policies = append(policies, newHTTPTracePolicy(cp.Logging.AllowedQueryParams)) policies = append(policies, NewLogPolicy(&cp.Logging)) - policies = append(policies, exported.PolicyFunc(httpHeaderPolicy), exported.PolicyFunc(bodyDownloadPolicy)) + policies = append(policies, exported.PolicyFunc(bodyDownloadPolicy)) transport := cp.Transport if transport == nil { transport = defaultHTTPClient diff --git a/sdk/azcore/runtime/policy_http_trace.go b/sdk/azcore/runtime/policy_http_trace.go new file mode 100644 index 000000000000..55eb7fea07cc --- /dev/null +++ b/sdk/azcore/runtime/policy_http_trace.go @@ -0,0 +1,117 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" +) + +const ( + attrHTTPMethod = "http.method" + attrHTTPURL = "http.url" + attrHTTPUserAgent = "http.user_agent" + attrHTTPStatusCode = "http.status_code" + + attrAZClientReqID = "az.client_request_id" + attrAZServiceReqID = "az.service_request_id" + + attrNetPeerName = "net.peer.name" +) + +// newHTTPTracePolicy creates a new instance of the httpTracePolicy. +// - allowedQueryParams contains the user-specified query parameters that don't need to be redacted from the trace +func newHTTPTracePolicy(allowedQueryParams []string) exported.Policy { + return &httpTracePolicy{allowedQP: getAllowedQueryParams(allowedQueryParams)} +} + +// httpTracePolicy is a policy that creates a trace for the HTTP request and its response +type httpTracePolicy struct { + allowedQP map[string]struct{} +} + +// Do implements the pipeline.Policy interfaces for the httpTracePolicy type. +func (h *httpTracePolicy) Do(req *policy.Request) (resp *http.Response, err error) { + rawTracer := req.Raw().Context().Value(shared.CtxWithTracingTracer{}) + if tracer, ok := rawTracer.(tracing.Tracer); ok { + attributes := []tracing.Attribute{ + {Key: attrHTTPMethod, Value: req.Raw().Method}, + {Key: attrHTTPURL, Value: getSanitizedURL(*req.Raw().URL, h.allowedQP)}, + {Key: attrNetPeerName, Value: req.Raw().URL.Host}, + } + + if ua := req.Raw().Header.Get(shared.HeaderUserAgent); ua != "" { + attributes = append(attributes, tracing.Attribute{Key: attrHTTPUserAgent, Value: ua}) + } + if reqID := req.Raw().Header.Get(shared.HeaderXMSClientRequestID); reqID != "" { + attributes = append(attributes, tracing.Attribute{Key: attrAZClientReqID, Value: reqID}) + } + + ctx := req.Raw().Context() + ctx, span := tracer.Start(ctx, "HTTP "+req.Raw().Method, &tracing.SpanOptions{ + Kind: tracing.SpanKindClient, + Attributes: attributes, + }) + + defer func() { + if resp != nil { + span.SetAttributes(tracing.Attribute{Key: attrHTTPStatusCode, Value: resp.StatusCode}) + if resp.StatusCode > 399 { + span.SetStatus(tracing.SpanStatusError, resp.Status) + } + if reqID := resp.Header.Get(shared.HeaderXMSRequestID); reqID != "" { + span.SetAttributes(tracing.Attribute{Key: attrAZServiceReqID, Value: reqID}) + } + } else if err != nil { + // including the output from err.Error() might disclose URL query parameters. + // so instead of attempting to sanitize the output, we simply output the error type. + span.SetStatus(tracing.SpanStatusError, fmt.Sprintf("%T", err)) + } + span.End() + }() + + req = req.WithContext(ctx) + } + resp, err = req.Next() + return +} + +// StartSpanOptions contains the optional values for StartSpan. +type StartSpanOptions struct { + // for future expansion +} + +// StartSpan starts a new tracing span. +// You must call the returned func to terminate the span. Pass the applicable error +// if the span will exit with an error condition. +// - ctx is the parent context of the newly created context +// - name is the name of the span. this is typically the fully qualified name of an API ("Client.Method") +// - tracer is the client's Tracer for creating spans +// - options contains optional values. pass nil to accept any default values +func StartSpan(ctx context.Context, name string, tracer tracing.Tracer, options *StartSpanOptions) (context.Context, func(error)) { + if !tracer.Enabled() { + return ctx, func(err error) {} + } + ctx, span := tracer.Start(ctx, name, &tracing.SpanOptions{ + Kind: tracing.SpanKindInternal, + }) + ctx = context.WithValue(ctx, shared.CtxWithTracingTracer{}, tracer) + return ctx, func(err error) { + if err != nil { + errType := strings.Replace(fmt.Sprintf("%T", err), "*exported.", "*azcore.", 1) + span.SetStatus(tracing.SpanStatusError, fmt.Sprintf("%s:\n%s", errType, err.Error())) + } + span.End() + } +} diff --git a/sdk/azcore/runtime/policy_http_trace_test.go b/sdk/azcore/runtime/policy_http_trace_test.go new file mode 100644 index 000000000000..16ae58604e1a --- /dev/null +++ b/sdk/azcore/runtime/policy_http_trace_test.go @@ -0,0 +1,163 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "io" + "net" + "net/http" + "net/url" + "strings" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" + "github.com/Azure/azure-sdk-for-go/sdk/internal/mock" + "github.com/stretchr/testify/require" +) + +func TestHTTPTracePolicy(t *testing.T) { + srv, close := mock.NewServer() + defer close() + + pl := exported.NewPipeline(srv, newHTTPTracePolicy([]string{"visibleqp"})) + + // no tracer + req, err := exported.NewRequest(context.Background(), http.MethodGet, srv.URL()) + require.NoError(t, err) + srv.AppendResponse() + _, err = pl.Do(req) + require.NoError(t, err) + + // wrong tracer type + req, err = exported.NewRequest(context.WithValue(context.Background(), shared.CtxWithTracingTracer{}, 0), http.MethodGet, srv.URL()) + require.NoError(t, err) + srv.AppendResponse() + _, err = pl.Do(req) + require.NoError(t, err) + + var fullSpanName string + var spanKind tracing.SpanKind + var spanAttrs []tracing.Attribute + var spanStatus tracing.SpanStatus + var spanStatusStr string + tr := tracing.NewTracer(func(ctx context.Context, spanName string, options *tracing.SpanOptions) (context.Context, tracing.Span) { + fullSpanName = spanName + require.NotNil(t, options) + spanKind = options.Kind + spanAttrs = options.Attributes + spanImpl := tracing.SpanImpl{ + SetAttributes: func(a ...tracing.Attribute) { spanAttrs = append(spanAttrs, a...) }, + SetStatus: func(ss tracing.SpanStatus, s string) { + spanStatus = ss + spanStatusStr = s + }, + } + return ctx, tracing.NewSpan(spanImpl) + }, nil) + + // HTTP ok + req, err = exported.NewRequest(context.WithValue(context.Background(), shared.CtxWithTracingTracer{}, tr), http.MethodGet, srv.URL()+"?foo=redactme&visibleqp=bar") + require.NoError(t, err) + req.Raw().Header.Add(shared.HeaderUserAgent, "my-user-agent") + req.Raw().Header.Add(shared.HeaderXMSClientRequestID, "my-client-request") + srv.AppendResponse(mock.WithHeader(shared.HeaderXMSRequestID, "request-id")) + _, err = pl.Do(req) + require.NoError(t, err) + require.EqualValues(t, tracing.SpanStatusUnset, spanStatus) + require.EqualValues(t, "HTTP GET", fullSpanName) + require.EqualValues(t, tracing.SpanKindClient, spanKind) + require.Len(t, spanAttrs, 7) + require.Contains(t, spanAttrs, tracing.Attribute{Key: attrHTTPMethod, Value: http.MethodGet}) + require.Contains(t, spanAttrs, tracing.Attribute{Key: attrHTTPURL, Value: srv.URL() + "?foo=REDACTED&visibleqp=bar"}) + require.Contains(t, spanAttrs, tracing.Attribute{Key: attrNetPeerName, Value: srv.URL()[7:]}) // strip off the http:// + require.Contains(t, spanAttrs, tracing.Attribute{Key: attrHTTPUserAgent, Value: "my-user-agent"}) + require.Contains(t, spanAttrs, tracing.Attribute{Key: attrAZClientReqID, Value: "my-client-request"}) + require.Contains(t, spanAttrs, tracing.Attribute{Key: attrHTTPStatusCode, Value: http.StatusOK}) + require.Contains(t, spanAttrs, tracing.Attribute{Key: attrAZServiceReqID, Value: "request-id"}) + + // HTTP bad request + req, err = exported.NewRequest(context.WithValue(context.Background(), shared.CtxWithTracingTracer{}, tr), http.MethodGet, srv.URL()) + require.NoError(t, err) + srv.AppendResponse(mock.WithStatusCode(http.StatusBadRequest)) + _, err = pl.Do(req) + require.NoError(t, err) + require.EqualValues(t, tracing.SpanStatusError, spanStatus) + require.EqualValues(t, "400 Bad Request", spanStatusStr) + require.Contains(t, spanAttrs, tracing.Attribute{Key: attrHTTPStatusCode, Value: http.StatusBadRequest}) + + // HTTP error + req, err = exported.NewRequest(context.WithValue(context.Background(), shared.CtxWithTracingTracer{}, tr), http.MethodGet, srv.URL()) + require.NoError(t, err) + srv.AppendError(net.ErrClosed) + _, err = pl.Do(req) + require.Error(t, err) + require.ErrorIs(t, err, net.ErrClosed) + require.EqualValues(t, tracing.SpanStatusError, spanStatus) + require.EqualValues(t, "poll.errNetClosing", spanStatusStr) +} + +func TestStartSpan(t *testing.T) { + // tracing disabled + ctx, end := StartSpan(context.Background(), "TestStartSpan", tracing.Tracer{}, nil) + end(nil) + require.Same(t, context.Background(), ctx) + + // span no error + var startCalled bool + var endCalled bool + tr := tracing.NewTracer(func(ctx context.Context, spanName string, options *tracing.SpanOptions) (context.Context, tracing.Span) { + startCalled = true + require.EqualValues(t, "TestStartSpan", spanName) + require.NotNil(t, options) + require.EqualValues(t, tracing.SpanKindInternal, options.Kind) + spanImpl := tracing.SpanImpl{ + End: func() { endCalled = true }, + } + return ctx, tracing.NewSpan(spanImpl) + }, nil) + ctx, end = StartSpan(context.Background(), "TestStartSpan", tr, nil) + end(nil) + ctxTr := ctx.Value(shared.CtxWithTracingTracer{}) + require.NotNil(t, ctxTr) + _, ok := ctxTr.(tracing.Tracer) + require.True(t, ok) + require.True(t, startCalled) + require.True(t, endCalled) + + // with error + var spanStatus tracing.SpanStatus + var errStr string + tr = tracing.NewTracer(func(ctx context.Context, spanName string, options *tracing.SpanOptions) (context.Context, tracing.Span) { + spanImpl := tracing.SpanImpl{ + End: func() { endCalled = true }, + SetStatus: func(ss tracing.SpanStatus, s string) { + spanStatus = ss + errStr = s + }, + } + return ctx, tracing.NewSpan(spanImpl) + }, nil) + _, end = StartSpan(context.Background(), "TestStartSpan", tr, nil) + u, err := url.Parse("https://contoso.com") + require.NoError(t, err) + resp := &http.Response{ + Status: "the operation failed", + StatusCode: http.StatusBadRequest, + Body: io.NopCloser(strings.NewReader(`{ "error": { "code": "ErrorItFailed", "message": "it's not working" } }`)), + Request: &http.Request{ + Method: http.MethodGet, + URL: u, + }, + } + end(exported.NewResponseError(resp)) + require.EqualValues(t, tracing.SpanStatusError, spanStatus) + require.Contains(t, errStr, "*azcore.ResponseError") + require.Contains(t, errStr, "ERROR CODE: ErrorItFailed") +} diff --git a/sdk/azcore/runtime/poller.go b/sdk/azcore/runtime/poller.go index 3d029a3d15b7..e57ad240dc04 100644 --- a/sdk/azcore/runtime/poller.go +++ b/sdk/azcore/runtime/poller.go @@ -13,6 +13,8 @@ import ( "flag" "fmt" "net/http" + "reflect" + "strings" "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" @@ -23,6 +25,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" ) @@ -54,6 +57,9 @@ type NewPollerOptions[T any] struct { // Handler[T] contains a custom polling implementation. Handler PollingHandler[T] + + // Tracer contains the Tracer from the client that's creating the Poller. + Tracer tracing.Tracer } // NewPoller creates a Poller based on the provided initial response. @@ -70,6 +76,7 @@ func NewPoller[T any](resp *http.Response, pl exported.Pipeline, options *NewPol op: options.Handler, resp: resp, result: result, + tracer: options.Tracer, }, nil } @@ -110,6 +117,7 @@ func NewPoller[T any](resp *http.Response, pl exported.Pipeline, options *NewPol op: opr, resp: resp, result: result, + tracer: options.Tracer, }, nil } @@ -121,6 +129,9 @@ type NewPollerFromResumeTokenOptions[T any] struct { // Handler[T] contains a custom polling implementation. Handler PollingHandler[T] + + // Tracer contains the Tracer from the client that's creating the Poller. + Tracer tracing.Tracer } // NewPollerFromResumeToken creates a Poller from a resume token string. @@ -166,6 +177,7 @@ func NewPollerFromResumeToken[T any](token string, pl exported.Pipeline, options return &Poller[T]{ op: opr, result: result, + tracer: options.Tracer, }, nil } @@ -188,6 +200,7 @@ type Poller[T any] struct { resp *http.Response err error result *T + tracer tracing.Tracer done bool } @@ -203,7 +216,7 @@ type PollUntilDoneOptions struct { // options: pass nil to accept the default values. // NOTE: the default polling frequency is 30 seconds which works well for most operations. However, some operations might // benefit from a shorter or longer duration. -func (p *Poller[T]) PollUntilDone(ctx context.Context, options *PollUntilDoneOptions) (T, error) { +func (p *Poller[T]) PollUntilDone(ctx context.Context, options *PollUntilDoneOptions) (res T, err error) { if options == nil { options = &PollUntilDoneOptions{} } @@ -212,9 +225,13 @@ func (p *Poller[T]) PollUntilDone(ctx context.Context, options *PollUntilDoneOpt cp.Frequency = 30 * time.Second } + ctx, endSpan := StartSpan(ctx, fmt.Sprintf("%s.PollUntilDone", shortenTypeName(reflect.TypeOf(*p).Name())), p.tracer, nil) + defer func() { endSpan(err) }() + // skip the floor check when executing tests so they don't take so long if isTest := flag.Lookup("test.v"); isTest == nil && cp.Frequency < time.Second { - return *new(T), errors.New("polling frequency minimum is one second") + err = errors.New("polling frequency minimum is one second") + return } start := time.Now() @@ -226,22 +243,24 @@ func (p *Poller[T]) PollUntilDone(ctx context.Context, options *PollUntilDoneOpt // initial check for a retry-after header existing on the initial response if retryAfter := shared.RetryAfter(p.resp); retryAfter > 0 { log.Writef(log.EventLRO, "initial Retry-After delay for %s", retryAfter.String()) - if err := shared.Delay(ctx, retryAfter); err != nil { + if err = shared.Delay(ctx, retryAfter); err != nil { logPollUntilDoneExit(err) - return *new(T), err + return } } } // begin polling the endpoint until a terminal state is reached for { - resp, err := p.Poll(ctx) + var resp *http.Response + resp, err = p.Poll(ctx) if err != nil { logPollUntilDoneExit(err) - return *new(T), err + return } if p.Done() { logPollUntilDoneExit("succeeded") - return p.Result(ctx) + res, err = p.Result(ctx) + return } d := cp.Frequency if retryAfter := shared.RetryAfter(resp); retryAfter > 0 { @@ -252,7 +271,7 @@ func (p *Poller[T]) PollUntilDone(ctx context.Context, options *PollUntilDoneOpt } if err = shared.Delay(ctx, d); err != nil { logPollUntilDoneExit(err) - return *new(T), err + return } } } @@ -261,17 +280,22 @@ func (p *Poller[T]) PollUntilDone(ctx context.Context, options *PollUntilDoneOpt // If Poll succeeds, the poller's state is updated and the HTTP response is returned. // If Poll fails, the poller's state is unmodified and the error is returned. // Calling Poll on an LRO that has reached a terminal state will return the last HTTP response. -func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { +func (p *Poller[T]) Poll(ctx context.Context) (resp *http.Response, err error) { if p.Done() { // the LRO has reached a terminal state, don't poll again - return p.resp, nil + resp = p.resp + return } - resp, err := p.op.Poll(ctx) + + ctx, endSpan := StartSpan(ctx, fmt.Sprintf("%s.Poll", shortenTypeName(reflect.TypeOf(*p).Name())), p.tracer, nil) + defer func() { endSpan(err) }() + + resp, err = p.op.Poll(ctx) if err != nil { - return nil, err + return } p.resp = resp - return p.resp, nil + return } // Done returns true if the LRO has reached a terminal state. @@ -284,31 +308,40 @@ func (p *Poller[T]) Done() bool { // If the LRO completed successfully, a populated instance of T is returned. // If the LRO failed or was canceled, an *azcore.ResponseError error is returned. // Calling this on an LRO in a non-terminal state will return an error. -func (p *Poller[T]) Result(ctx context.Context) (T, error) { +func (p *Poller[T]) Result(ctx context.Context) (res T, err error) { if !p.Done() { - return *new(T), errors.New("poller is in a non-terminal state") + err = errors.New("poller is in a non-terminal state") + return } if p.done { // the result has already been retrieved, return the cached value if p.err != nil { - return *new(T), p.err + err = p.err + return } - return *p.result, nil + res = *p.result + return } - err := p.op.Result(ctx, p.result) + + ctx, endSpan := StartSpan(ctx, fmt.Sprintf("%s.Result", shortenTypeName(reflect.TypeOf(*p).Name())), p.tracer, nil) + defer func() { endSpan(err) }() + + err = p.op.Result(ctx, p.result) var respErr *exported.ResponseError if errors.As(err, &respErr) { // the LRO failed. record the error p.err = err } else if err != nil { // the call to Result failed, don't cache anything in this case - return *new(T), err + return } p.done = true if p.err != nil { - return *new(T), p.err + err = p.err + return } - return *p.result, nil + res = *p.result + return } // ResumeToken returns a value representing the poller that can be used to resume @@ -325,3 +358,22 @@ func (p *Poller[T]) ResumeToken() (string, error) { } return tk, err } + +// extracts the type name from the string returned from reflect.Value.Name() +func shortenTypeName(s string) string { + // the value is formatted as follows + // Poller[module/Package.Type].Method + // we want to shorten the generic type parameter string to Type + // anything we don't recognize will be left as-is + begin := strings.Index(s, "[") + end := strings.Index(s, "]") + if begin == -1 || end == -1 { + return s + } + + typeName := s[begin+1 : end] + if i := strings.LastIndex(typeName, "."); i > -1 { + typeName = typeName[i+1:] + } + return s[:begin+1] + typeName + s[end:] +} diff --git a/sdk/azcore/runtime/poller_test.go b/sdk/azcore/runtime/poller_test.go index 3ce04097a0e5..7811f0fe51ab 100644 --- a/sdk/azcore/runtime/poller_test.go +++ b/sdk/azcore/runtime/poller_test.go @@ -1176,3 +1176,17 @@ func TestNewPollerWithCustomHandler(t *testing.T) { require.NoError(t, err) require.EqualValues(t, "value", *result.Field) } + +func TestShortenPollerTypeName(t *testing.T) { + result := shortenTypeName("Poller[module/package.ClientOperationResponse].PollUntilDone") + require.EqualValues(t, "Poller[ClientOperationResponse].PollUntilDone", result) + + result = shortenTypeName("Poller[package.ClientOperationResponse].PollUntilDone") + require.EqualValues(t, "Poller[ClientOperationResponse].PollUntilDone", result) + + result = shortenTypeName("Poller[ClientOperationResponse].PollUntilDone") + require.EqualValues(t, "Poller[ClientOperationResponse].PollUntilDone", result) + + result = shortenTypeName("Poller.PollUntilDone") + require.EqualValues(t, "Poller.PollUntilDone", result) +} diff --git a/sdk/azcore/tracing/tracing.go b/sdk/azcore/tracing/tracing.go index 75f757cedd3b..f5157005f555 100644 --- a/sdk/azcore/tracing/tracing.go +++ b/sdk/azcore/tracing/tracing.go @@ -45,21 +45,28 @@ func (p Provider) NewTracer(name, version string) (tracer Tracer) { // TracerOptions contains the optional values when creating a Tracer. type TracerOptions struct { - // for future expansion + // SpanFromContext contains the implementation for the Tracer.SpanFromContext method. + SpanFromContext func(context.Context) (Span, bool) } // NewTracer creates a Tracer with the specified values. // - newSpanFn is the underlying implementation for creating Span instances // - options contains optional values; pass nil to accept the default value func NewTracer(newSpanFn func(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span), options *TracerOptions) Tracer { + if options == nil { + options = &TracerOptions{} + } return Tracer{ - newSpanFn: newSpanFn, + newSpanFn: newSpanFn, + spanFromContextFn: options.SpanFromContext, } } // Tracer is the factory that creates Span instances. type Tracer struct { - newSpanFn func(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span) + attrs []Attribute + newSpanFn func(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span) + spanFromContextFn func(ctx context.Context) (Span, bool) } // Start creates a new span and a context.Context that contains it. @@ -68,11 +75,37 @@ type Tracer struct { // - options contains optional values for the span, pass nil to accept any defaults func (t Tracer) Start(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span) { if t.newSpanFn != nil { - return t.newSpanFn(ctx, spanName, options) + opts := SpanOptions{} + if options != nil { + opts = *options + } + opts.Attributes = append(opts.Attributes, t.attrs...) + return t.newSpanFn(ctx, spanName, &opts) } return ctx, Span{} } +// SetAttributes sets attrs to be applied to each Span. If a key from attrs +// already exists for an attribute of the Span it will be overwritten with +// the value contained in attrs. +func (t *Tracer) SetAttributes(attrs ...Attribute) { + t.attrs = append(t.attrs, attrs...) +} + +// Enabled returns true if this Tracer is capable of creating Spans. +func (t Tracer) Enabled() bool { + return t.newSpanFn != nil +} + +// SpanFromContext returns the Span associated with the current context. +// If the provided context has no Span, false is returned. +func (t Tracer) SpanFromContext(ctx context.Context) (Span, bool) { + if t.spanFromContextFn != nil { + return t.spanFromContextFn(ctx) + } + return Span{}, false +} + // SpanOptions contains optional settings for creating a span. type SpanOptions struct { // Kind indicates the kind of Span. diff --git a/sdk/azcore/tracing/tracing_test.go b/sdk/azcore/tracing/tracing_test.go index da04627e3167..5ca8b3f267de 100644 --- a/sdk/azcore/tracing/tracing_test.go +++ b/sdk/azcore/tracing/tracing_test.go @@ -17,6 +17,8 @@ func TestProviderZeroValues(t *testing.T) { pr := Provider{} tr := pr.NewTracer("name", "version") require.Zero(t, tr) + require.False(t, tr.Enabled()) + tr.SetAttributes() ctx, sp := tr.Start(context.Background(), "spanName", nil) require.Equal(t, context.Background(), ctx) require.Zero(t, sp) @@ -25,6 +27,9 @@ func TestProviderZeroValues(t *testing.T) { sp.End() sp.SetAttributes(Attribute{}) sp.SetStatus(SpanStatusError, "boom") + sp, ok := tr.SpanFromContext(ctx) + require.False(t, ok) + require.Zero(t, sp) } func TestProvider(t *testing.T) { @@ -33,6 +38,7 @@ func TestProvider(t *testing.T) { var endCalled bool var setAttributesCalled bool var setStatusCalled bool + var spanFromContextCalled bool pr := NewProvider(func(name, version string) Tracer { return NewTracer(func(context.Context, string, *SpanOptions) (context.Context, Span) { @@ -43,10 +49,23 @@ func TestProvider(t *testing.T) { SetAttributes: func(...Attribute) { setAttributesCalled = true }, SetStatus: func(SpanStatus, string) { setStatusCalled = true }, }) - }, nil) + }, &TracerOptions{ + SpanFromContext: func(context.Context) (Span, bool) { + spanFromContextCalled = true + return Span{}, true + }, + }) }, nil) tr := pr.NewTracer("name", "version") require.NotZero(t, tr) + require.True(t, tr.Enabled()) + sp, ok := tr.SpanFromContext(context.Background()) + require.True(t, ok) + require.Zero(t, sp) + tr.SetAttributes(Attribute{Key: "some", Value: "attribute"}) + require.Len(t, tr.attrs, 1) + require.EqualValues(t, tr.attrs[0].Key, "some") + require.EqualValues(t, tr.attrs[0].Value, "attribute") ctx, sp := tr.Start(context.Background(), "name", nil) require.NotEqual(t, context.Background(), ctx) @@ -62,4 +81,5 @@ func TestProvider(t *testing.T) { require.True(t, endCalled) require.True(t, setAttributesCalled) require.True(t, setStatusCalled) + require.True(t, spanFromContextCalled) } From 4a66b4f8eb8cdeb173a750c53069edbc74d3520d Mon Sep 17 00:00:00 2001 From: Charles Lowell <10964656+chlowell@users.noreply.github.com> Date: Tue, 9 May 2023 08:59:23 -0700 Subject: [PATCH 35/50] Restore ARM CAE support for azcore beta (#20657) This reverts commit 902097226ff3fe2fc6c3e7fc50d3478350253614. --- sdk/azcore/arm/runtime/policy_bearer_token.go | 48 ++++++++++++++++++- .../arm/runtime/policy_bearer_token_test.go | 8 ++-- sdk/azcore/internal/exported/exported.go | 4 ++ 3 files changed, 54 insertions(+), 6 deletions(-) diff --git a/sdk/azcore/arm/runtime/policy_bearer_token.go b/sdk/azcore/arm/runtime/policy_bearer_token.go index 07f15991eb27..83f1bf86e65e 100644 --- a/sdk/azcore/arm/runtime/policy_bearer_token.go +++ b/sdk/azcore/arm/runtime/policy_bearer_token.go @@ -5,6 +5,7 @@ package runtime import ( "context" + "encoding/base64" "fmt" "net/http" "strings" @@ -63,11 +64,28 @@ func NewBearerTokenPolicy(cred azcore.TokenCredential, opts *armpolicy.BearerTok p.scopes = make([]string, len(opts.Scopes)) copy(p.scopes, opts.Scopes) p.btp = azruntime.NewBearerTokenPolicy(cred, opts.Scopes, &azpolicy.BearerTokenOptions{ - AuthorizationHandler: azpolicy.AuthorizationHandler{OnRequest: p.onRequest}, + AuthorizationHandler: azpolicy.AuthorizationHandler{ + OnChallenge: p.onChallenge, + OnRequest: p.onRequest, + }, }) return p } +func (b *BearerTokenPolicy) onChallenge(req *azpolicy.Request, res *http.Response, authNZ func(azpolicy.TokenRequestOptions) error) error { + challenge := res.Header.Get(shared.HeaderWWWAuthenticate) + claims, err := parseChallenge(challenge) + if err != nil { + // the challenge contains claims we can't parse + return err + } else if claims != "" { + // request a new token having the specified claims, send the request again + return authNZ(azpolicy.TokenRequestOptions{Claims: claims, Scopes: b.scopes}) + } + // auth challenge didn't include claims, so this is a simple authorization failure + return azruntime.NewResponseError(res) +} + // onRequest authorizes requests with one or more bearer tokens func (b *BearerTokenPolicy) onRequest(req *azpolicy.Request, authNZ func(azpolicy.TokenRequestOptions) error) error { // authorize the request with a token for the primary tenant @@ -97,3 +115,31 @@ func (b *BearerTokenPolicy) onRequest(req *azpolicy.Request, authNZ func(azpolic func (b *BearerTokenPolicy) Do(req *azpolicy.Request) (*http.Response, error) { return b.btp.Do(req) } + +// parseChallenge parses claims from an authentication challenge issued by ARM so a client can request a token +// that will satisfy conditional access policies. It returns a non-nil error when the given value contains +// claims it can't parse. If the value contains no claims, it returns an empty string and a nil error. +func parseChallenge(wwwAuthenticate string) (string, error) { + claims := "" + var err error + for _, param := range strings.Split(wwwAuthenticate, ",") { + if _, after, found := strings.Cut(param, "claims="); found { + if claims != "" { + // The header contains multiple challenges, at least two of which specify claims. The specs allow this + // but it's unclear what a client should do in this case and there's as yet no concrete example of it. + err = fmt.Errorf("found multiple claims challenges in %q", wwwAuthenticate) + break + } + // trim stuff that would get an error from RawURLEncoding; claims may or may not be padded + claims = strings.Trim(after, `\"=`) + // we don't return this error because it's something unhelpful like "illegal base64 data at input byte 42" + if b, decErr := base64.RawURLEncoding.DecodeString(claims); decErr == nil { + claims = string(b) + } else { + err = fmt.Errorf("failed to parse claims from %q", wwwAuthenticate) + break + } + } + } + return claims, err +} diff --git a/sdk/azcore/arm/runtime/policy_bearer_token_test.go b/sdk/azcore/arm/runtime/policy_bearer_token_test.go index 8357beb24c1b..1ab06ae00d76 100644 --- a/sdk/azcore/arm/runtime/policy_bearer_token_test.go +++ b/sdk/azcore/arm/runtime/policy_bearer_token_test.go @@ -203,7 +203,6 @@ func TestAuxiliaryTenants(t *testing.T) { } func TestBearerTokenPolicyChallengeParsing(t *testing.T) { - t.Skip("unskip this test after adding back CAE support") for _, test := range []struct { challenge, desc, expectedClaims string err error @@ -262,10 +261,9 @@ func TestBearerTokenPolicyChallengeParsing(t *testing.T) { cred := mockCredential{ getTokenImpl: func(ctx context.Context, actual azpolicy.TokenRequestOptions) (azcore.AccessToken, error) { calls += 1 - // TODO: uncomment after restoring TokenRequestOptions.Claims - // if calls == 2 && test.expectedClaims != "" { - // require.Equal(t, test.expectedClaims, actual.Claims) - // } + if calls == 2 && test.expectedClaims != "" { + require.Equal(t, test.expectedClaims, actual.Claims) + } return azcore.AccessToken{Token: "...", ExpiresOn: time.Now().Add(time.Hour).UTC()}, nil }, } diff --git a/sdk/azcore/internal/exported/exported.go b/sdk/azcore/internal/exported/exported.go index a1236b362526..c389cdcd8a5c 100644 --- a/sdk/azcore/internal/exported/exported.go +++ b/sdk/azcore/internal/exported/exported.go @@ -51,6 +51,10 @@ type AccessToken struct { // TokenRequestOptions contain specific parameter that may be used by credentials types when attempting to get a token. // Exported as policy.TokenRequestOptions. type TokenRequestOptions struct { + // Claims are any additional claims required for the token to satisfy a conditional access policy, such as a + // service may return in a claims challenge following an authorization failure. If a service returned the + // claims value base64 encoded, it must be decoded before setting this field. + Claims string // Scopes contains the list of permission scopes required for the token. Scopes []string From 7d4a3cbaadd5bf9f16322e1e2c673a633f146fb1 Mon Sep 17 00:00:00 2001 From: Charles Lowell <10964656+chlowell@users.noreply.github.com> Date: Tue, 9 May 2023 09:31:50 -0700 Subject: [PATCH 36/50] Upgrade to stable azcore (#20808) --- sdk/azidentity/go.mod | 4 ++-- sdk/azidentity/go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/sdk/azidentity/go.mod b/sdk/azidentity/go.mod index b1158a670b2a..05eb779280ac 100644 --- a/sdk/azidentity/go.mod +++ b/sdk/azidentity/go.mod @@ -3,8 +3,8 @@ module github.com/Azure/azure-sdk-for-go/sdk/azidentity go 1.18 require ( - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.5.0-beta.1 - github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 + github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 github.com/golang-jwt/jwt/v4 v4.5.0 github.com/google/uuid v1.3.0 diff --git a/sdk/azidentity/go.sum b/sdk/azidentity/go.sum index a032d22d66c7..934934be4a7e 100644 --- a/sdk/azidentity/go.sum +++ b/sdk/azidentity/go.sum @@ -1,7 +1,7 @@ -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.5.0-beta.1 h1:yLM4ZIC+NRvzwFGpXjUbf5FhPBVxJgmYXkjePgNAx64= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.5.0-beta.1/go.mod h1:ON4tFdPTwRcgWEaVDrN3584Ef+b7GgSJaXxe5fW9t4M= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0 h1:leh5DwKv6Ihwi+h60uHtn6UWAxBbZ0q8DwQVMzf61zw= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 h1:8kDqDngH+DmVBiCtIjCFTGa7MBnsIOkF9IccInFEbjk= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY= github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= From 068c3be119351c54fd277dbc9475b40156733b7b Mon Sep 17 00:00:00 2001 From: Azure SDK Bot <53356347+azure-sdk@users.noreply.github.com> Date: Tue, 9 May 2023 09:34:41 -0700 Subject: [PATCH 37/50] Increment package version after release of data/azcosmos (#20807) --- sdk/data/azcosmos/CHANGELOG.md | 10 ++++++++++ sdk/data/azcosmos/version.go | 2 +- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/sdk/data/azcosmos/CHANGELOG.md b/sdk/data/azcosmos/CHANGELOG.md index f4652730f572..180606f1b413 100644 --- a/sdk/data/azcosmos/CHANGELOG.md +++ b/sdk/data/azcosmos/CHANGELOG.md @@ -1,5 +1,15 @@ # Release History +## 0.3.6 (Unreleased) + +### Features Added + +### Breaking Changes + +### Bugs Fixed + +### Other Changes + ## 0.3.5 (2023-05-09) ### Features Added diff --git a/sdk/data/azcosmos/version.go b/sdk/data/azcosmos/version.go index 6773a23e3474..5ef57c68794a 100644 --- a/sdk/data/azcosmos/version.go +++ b/sdk/data/azcosmos/version.go @@ -4,4 +4,4 @@ package azcosmos // serviceLibVersion is the semantic version (see http://semver.org) of this module. -const serviceLibVersion = "v0.3.5" +const serviceLibVersion = "v0.3.6" From 8e0f66ea652af8e1390c3bb3f9a86ff364fdade7 Mon Sep 17 00:00:00 2001 From: Sourav Gupta <98318303+souravgupta-msft@users.noreply.github.com> Date: Tue, 9 May 2023 22:52:31 +0530 Subject: [PATCH 38/50] Updating changelog (#20810) --- sdk/storage/azfile/CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/storage/azfile/CHANGELOG.md b/sdk/storage/azfile/CHANGELOG.md index 04f97b45434f..b14cf748a116 100644 --- a/sdk/storage/azfile/CHANGELOG.md +++ b/sdk/storage/azfile/CHANGELOG.md @@ -1,6 +1,6 @@ # Release History -## 0.1.0 (Unreleased) +## 0.1.0 (2023-05-09) ### Features Added From ce926c4cadfc3d0604da8f404c25cde7b4c016ac Mon Sep 17 00:00:00 2001 From: Joel Hendrix Date: Tue, 9 May 2023 10:51:18 -0700 Subject: [PATCH 39/50] Add fake package to azcore (#20711) * Add fake package to azcore This is the supporting infrastructure for the generated SDK fakes. * fix doc comment --- sdk/azcore/CHANGELOG.md | 3 + sdk/azcore/fake/example_test.go | 146 +++++++ sdk/azcore/fake/fake.go | 378 ++++++++++++++++++ sdk/azcore/fake/fake_test.go | 331 +++++++++++++++ .../internal/exported/response_error.go | 3 +- .../internal/exported/response_error_test.go | 10 +- sdk/azcore/internal/pollers/fake/fake.go | 118 ++++++ sdk/azcore/internal/pollers/fake/fake_test.go | 185 +++++++++ sdk/azcore/internal/shared/constants.go | 2 + sdk/azcore/internal/shared/shared.go | 3 + sdk/azcore/runtime/poller.go | 9 +- sdk/azcore/runtime/poller_test.go | 51 ++- sdk/azcore/runtime/request.go | 3 + 13 files changed, 1223 insertions(+), 19 deletions(-) create mode 100644 sdk/azcore/fake/example_test.go create mode 100644 sdk/azcore/fake/fake.go create mode 100644 sdk/azcore/fake/fake_test.go create mode 100644 sdk/azcore/internal/pollers/fake/fake.go create mode 100644 sdk/azcore/internal/pollers/fake/fake_test.go diff --git a/sdk/azcore/CHANGELOG.md b/sdk/azcore/CHANGELOG.md index de9fd6de967c..c5c1daa74b55 100644 --- a/sdk/azcore/CHANGELOG.md +++ b/sdk/azcore/CHANGELOG.md @@ -11,6 +11,9 @@ * Added field `SpanFromContext` to `tracing.TracerOptions`. * Added methods `Enabled()`, `SetAttributes()`, and `SpanFromContext()` to `tracing.Tracer`. * Added supporting pipeline policies to include HTTP spans when creating clients. +* Added package `fake` to support generated fakes packages in SDKs. + * The package contains public surface area exposed by fake servers and supporting APIs intended only for use by the fake server implementations. + * Added an internal fake poller implementation. ### Breaking Changes diff --git a/sdk/azcore/fake/example_test.go b/sdk/azcore/fake/example_test.go new file mode 100644 index 000000000000..1fbd12314ea3 --- /dev/null +++ b/sdk/azcore/fake/example_test.go @@ -0,0 +1,146 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package fake_test + +import ( + "errors" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/fake" +) + +// Widget is a hypothetical type used in the following examples. +type Widget struct { + ID int + Shape string +} + +// WidgetResponse is a hypothetical type used in the following examples. +type WidgetResponse struct { + Widget +} + +// WidgetListResponse is a hypothetical type used in the following examples. +type WidgetListResponse struct { + Widgets []Widget +} + +func ExampleNewTokenCredential() { + // create a fake azcore.TokenCredential + // the fake is used as the client credential during testing with fakes. + var _ azcore.TokenCredential = fake.NewTokenCredential() +} + +func ExampleTokenCredential_SetError() { + cred := fake.NewTokenCredential() + + // set an error to be returned during authentication + cred.SetError(errors.New("failed to authenticate")) +} + +func ExampleResponder() { + // for a hypothetical API GetNextWidget(context.Context) (WidgetResponse, error) + + // a Responder is used to build a scalar response + resp := fake.Responder[WidgetResponse]{} + + // here we set the instance of Widget the Responder is to return + resp.Set(WidgetResponse{ + Widget{ID: 123, Shape: "triangle"}, + }) + + // optional HTTP headers can also be included in the raw response + resp.SetHeader("custom-header1", "value1") + resp.SetHeader("custom-header2", "value2") +} + +func ExampleErrorResponder() { + // an ErrorResponder is used to build an error response + errResp := fake.ErrorResponder{} + + // use SetError to return a generic error + errResp.SetError(errors.New("the system is down")) + + // to return an *azcore.ResponseError, use SetResponseError + errResp.SetResponseError("ErrorCodeConflict", http.StatusConflict) + + // ErrorResponder returns a singular error, so calling Set* APIs overwrites any previous value +} + +func ExamplePagerResponder() { + // for a hypothetical API NewListWidgetsPager() *runtime.Pager[WidgetListResponse] + + // a PagerResponder is used to build a sequence of responses for a paged operation + pagerResp := fake.PagerResponder[WidgetListResponse]{} + + // use AddPage to add one or more pages to the response. + // responses are returned in the order in which they were added. + pagerResp.AddPage(WidgetListResponse{ + Widgets: []Widget{ + {ID: 1, Shape: "circle"}, + {ID: 2, Shape: "square"}, + {ID: 3, Shape: "triangle"}, + }, + }, nil) + pagerResp.AddPage(WidgetListResponse{ + Widgets: []Widget{ + {ID: 4, Shape: "rectangle"}, + {ID: 5, Shape: "rhombus"}, + }, + }, nil) + + // errors can also be included in the sequence of responses. + // this can be used to simulate an error during paging. + pagerResp.AddError(errors.New("network too slow")) + + pagerResp.AddPage(WidgetListResponse{ + Widgets: []Widget{ + {ID: 6, Shape: "trapezoid"}, + }, + }, nil) +} + +func ExamplePollerResponder() { + // for a hypothetical API BeginCreateWidget(context.Context) (*runtime.Poller[WidgetResponse], error) + + // a PollerResponder is used to build a sequence of responses for a long-running operation + pollerResp := fake.PollerResponder[WidgetResponse]{} + + // use AddNonTerminalResponse to add one or more non-terminal responses + // to the sequence of responses. this is to simulate polling on a LRO. + // non-terminal responses are optional. exclude them to simulate a LRO + // that synchronously completes. + pollerResp.AddNonTerminalResponse(nil) + + // non-terminal errors can also be included in the sequence of responses. + // use this to simulate an error during polling. + pollerResp.AddNonTerminalError(errors.New("flaky network")) + + // use SetTerminalResponse to successfully terminate the long-running operation. + // the provided value will be returned as the terminal response. + pollerResp.SetTerminalResponse(WidgetResponse{ + Widget: Widget{ + ID: 987, + Shape: "dodecahedron", + }, + }) +} + +func ExamplePollerResponder_SetTerminalError() { + // for a hypothetical API BeginCreateWidget(context.Context) (*runtime.Poller[WidgetResponse], error) + + // a PollerResponder is used to build a sequence of responses for a long-running operation + pollerResp := fake.PollerResponder[WidgetResponse]{} + + // use SetTerminalError to terminate the long-running operation with an error. + // this returns an *azcore.ResponseError as the terminal response. + pollerResp.SetTerminalError("NoMoreWidgets", http.StatusBadRequest) + + // note that SetTerminalResponse and SetTerminalError are meant to be mutually exclusive. + // in the event that both are called, the result from SetTerminalError will be used. +} diff --git a/sdk/azcore/fake/fake.go b/sdk/azcore/fake/fake.go new file mode 100644 index 000000000000..1e5091248620 --- /dev/null +++ b/sdk/azcore/fake/fake.go @@ -0,0 +1,378 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// Package fake provides the building blocks for fake servers. +// This includes fakes for authentication, API responses, and more. +// +// Most of the content in this package is intended to be used by +// SDK authors in construction of their fakes. +package fake + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "strconv" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" +) + +// NewTokenCredential creates an instance of the TokenCredential type. +func NewTokenCredential() *TokenCredential { + return &TokenCredential{} +} + +// TokenCredential is a fake credential that implements the azcore.TokenCredential interface. +type TokenCredential struct { + err error +} + +// SetError sets the specified error to be returned from GetToken(). +// Use this to simulate an error during authentication. +func (t *TokenCredential) SetError(err error) { + t.err = &nonRetriableError{err} +} + +// GetToken implements the azcore.TokenCredential for the TokenCredential type. +func (t *TokenCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + if t.err != nil { + return azcore.AccessToken{}, &nonRetriableError{t.err} + } + return azcore.AccessToken{Token: "fake_token", ExpiresOn: time.Now().Add(24 * time.Hour)}, nil +} + +///////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// Responder represents a scalar response. +type Responder[T any] struct { + h http.Header + resp T +} + +// Set sets the specified value to be returned. +func (r *Responder[T]) Set(b T) { + r.resp = b +} + +// SetHeader sets the specified header key/value pairs to be returned. +// Call multiple times to set multiple headers. +func (r *Responder[T]) SetHeader(key, value string) { + if r.h == nil { + r.h = http.Header{} + } + r.h.Set(key, value) +} + +///////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// ErrorResponder represents a scalar error response. +type ErrorResponder struct { + err error +} + +// SetError sets the specified error to be returned. +// Use SetResponseError for returning an *azcore.ResponseError. +func (e *ErrorResponder) SetError(err error) { + e.err = &nonRetriableError{err: err} +} + +// SetResponseError sets an *azcore.ResponseError with the specified values to be returned. +func (e *ErrorResponder) SetResponseError(errorCode string, httpStatus int) { + e.err = &nonRetriableError{err: &azcore.ResponseError{ErrorCode: errorCode, StatusCode: httpStatus}} +} + +///////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// PagerResponder represents a sequence of paged responses. +// Responses are replayed in the order in which they were added. +type PagerResponder[T any] struct { + pages []any +} + +// AddPage adds a page to the sequence of respones. +func (p *PagerResponder[T]) AddPage(page T, o *AddPageOptions) { + p.pages = append(p.pages, page) +} + +// AddError adds an error to the sequence of responses. +// The error is returned from the call to runtime.Pager[T].NextPage(). +func (p *PagerResponder[T]) AddError(err error) { + p.pages = append(p.pages, &nonRetriableError{err: err}) +} + +// AddResponseError adds an *azcore.ResponseError to the sequence of responses. +// The error is returned from the call to runtime.Pager[T].NextPage(). +func (p *PagerResponder[T]) AddResponseError(errorCode string, httpStatus int) { + p.pages = append(p.pages, &nonRetriableError{err: &azcore.ResponseError{ErrorCode: errorCode, StatusCode: httpStatus}}) +} + +// AddPageOptions contains the optional values for PagerResponder[T].AddPage. +type AddPageOptions struct { + // placeholder for future options +} + +///////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// PollerResponder represents a sequence of responses for a long-running operation. +// Any non-terminal responses are replayed in the order in which they were added. +// The terminal response, success or error, is always the final response. +type PollerResponder[T any] struct { + nonTermResps []nonTermResp + res *T + err *exported.ResponseError +} + +// AddNonTerminalResponse adds a non-terminal response to the sequence of responses. +func (p *PollerResponder[T]) AddNonTerminalResponse(o *AddNonTerminalResponseOptions) { + p.nonTermResps = append(p.nonTermResps, nonTermResp{status: "InProgress"}) +} + +// AddNonTerminalError adds a non-terminal error to the sequence of responses. +// Use this to simulate an error durring polling. +func (p *PollerResponder[T]) AddNonTerminalError(err error) { + p.nonTermResps = append(p.nonTermResps, nonTermResp{err: err}) +} + +// SetTerminalResponse sets the provided value as the successful, terminal response. +func (p *PollerResponder[T]) SetTerminalResponse(result T) { + p.res = &result +} + +// SetTerminalError sets an *azcore.ResponseError with the specified values as the failed terminal response. +func (p *PollerResponder[T]) SetTerminalError(errorCode string, httpStatus int) { + p.err = &exported.ResponseError{ErrorCode: errorCode, StatusCode: httpStatus} +} + +// AddNonTerminalResponseOptions contains the optional values for PollerResponder[T].AddNonTerminalResponse. +type AddNonTerminalResponseOptions struct { + // place holder for future optional values +} + +///////////////////////////////////////////////////////////////////////////////////////////////////////////// +// the following APIs are intended for use by fake servers +///////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// MarshalResponseAsJSON converts the body into JSON and returns it in a *http.Response. +// This method is typically called by the fake server internals. +func MarshalResponseAsJSON[T any](r Responder[T], req *http.Request) (*http.Response, error) { + body, err := json.Marshal(r.resp) + if err != nil { + return nil, &nonRetriableError{err} + } + resp := newResponse(http.StatusOK, "OK", req, string(body)) + for key := range r.h { + resp.Header.Set(key, r.h.Get(key)) + } + return resp, nil +} + +// UnmarshalRequestAsJSON unmarshalls the request body into an instance of T. +// This method is typically called by the fake server internals. +func UnmarshalRequestAsJSON[T any](req *http.Request) (T, error) { + tt := *new(T) + body, err := io.ReadAll(req.Body) + if err != nil { + return tt, &nonRetriableError{err} + } + req.Body.Close() + if err = json.Unmarshal(body, &tt); err != nil { + err = &nonRetriableError{err} + } + return tt, err +} + +// GetError returns the error for this responder. +// This method is typically called by the fake server internals. +func GetError(e ErrorResponder, req *http.Request) error { + if e.err == nil { + return nil + } + + var respErr *azcore.ResponseError + if errors.As(e.err, &respErr) { + // fix up the raw response + respErr.RawResponse = newErrorResponse(respErr.ErrorCode, respErr.StatusCode, req) + } + return &nonRetriableError{e.err} +} + +// PagerResponderNext returns the next response in the sequence (a T or an error). +// This method is typically called by the fake server internals. +func PagerResponderNext[T any](p *PagerResponder[T], req *http.Request) (*http.Response, error) { + if len(p.pages) == 0 { + return nil, &nonRetriableError{errors.New("paged response has no pages")} + } + + page := p.pages[0] + p.pages = p.pages[1:] + + pageT, ok := page.(T) + if ok { + body, err := json.Marshal(pageT) + if err != nil { + return nil, &nonRetriableError{err} + } + return newResponse(http.StatusOK, "OK", req, string(body)), nil + } + + err := page.(error) + var respErr *azcore.ResponseError + if errors.As(err, &respErr) { + // fix up the raw response + respErr.RawResponse = newErrorResponse(respErr.ErrorCode, respErr.StatusCode, req) + } + return nil, &nonRetriableError{err} +} + +// PagerResponderMore returns true if there are more responses for consumption. +// This method is typically called by the fake server internals. +func PagerResponderMore[T any](p *PagerResponder[T]) bool { + return len(p.pages) > 0 +} + +type pageindex[T any] struct { + i int + page T +} + +// PagerResponderInjectNextLinks is used to populate the nextLink field. +// The inject callback is executed for every T in the sequence except for the last one. +// This method is typically called by the fake server internals. +func PagerResponderInjectNextLinks[T any](p *PagerResponder[T], req *http.Request, inject func(page *T, createLink func() string)) { + // first find all the actual pages in the list + pages := make([]pageindex[T], 0, len(p.pages)) + for i := range p.pages { + if pageT, ok := p.pages[i].(T); ok { + pages = append(pages, pageindex[T]{ + i: i, + page: pageT, + }) + } + } + + // now populate the next links + for i := range pages { + if i+1 == len(pages) { + // no nextLink for last page + break + } + + inject(&pages[i].page, func() string { + return fmt.Sprintf("%s://%s%s/page_%d", req.URL.Scheme, req.URL.Host, req.URL.Path, i+1) + }) + + // update the original slice with the modified page + p.pages[pages[i].i] = pages[i].page + } +} + +// PollerResponderMore returns true if there are more responses for consumption. +// This method is typically called by the fake server internals. +func PollerResponderMore[T any](p *PollerResponder[T]) bool { + return len(p.nonTermResps) > 0 || p.err != nil || p.res != nil +} + +// PollerResponderNext returns the next response in the sequence (a *http.Response or an error). +// This method is typically called by the fake server internals. +func PollerResponderNext[T any](p *PollerResponder[T], req *http.Request) (*http.Response, error) { + if len(p.nonTermResps) > 0 { + resp := p.nonTermResps[0] + p.nonTermResps = p.nonTermResps[1:] + + if resp.err != nil { + return nil, &nonRetriableError{resp.err} + } + + httpResp := newResponse(http.StatusOK, "OK", req, "") + httpResp.Header.Set(shared.HeaderFakePollerStatus, resp.status) + + if resp.retryAfter > 0 { + httpResp.Header.Add(shared.HeaderRetryAfter, strconv.Itoa(resp.retryAfter)) + } + + return httpResp, nil + } + + if p.err != nil { + err := p.err + err.RawResponse = newErrorResponse(p.err.ErrorCode, p.err.StatusCode, req) + p.err = nil + return nil, &nonRetriableError{err} + } else if p.res != nil { + body, err := json.Marshal(*p.res) + if err != nil { + return nil, &nonRetriableError{err} + } + p.res = nil + httpResp := newResponse(http.StatusOK, "OK", req, string(body)) + httpResp.Header.Set(shared.HeaderFakePollerStatus, "Succeeded") + return httpResp, nil + } else { + return nil, &nonRetriableError{fmt.Errorf("%T has no terminal response", p)} + } +} + +type nonTermResp struct { + status string + retryAfter int + err error +} + +func newResponse(statusCode int, status string, req *http.Request, body string) *http.Response { + resp := &http.Response{ + Body: http.NoBody, + Header: http.Header{}, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Request: req, + Status: status, + StatusCode: statusCode, + } + + if l := int64(len(body)); l > 0 { + resp.Header.Set(shared.HeaderContentType, shared.ContentTypeAppJSON) + resp.ContentLength = l + resp.Body = io.NopCloser(strings.NewReader(body)) + } + + return resp +} + +func newErrorResponse(errorCode string, statusCode int, req *http.Request) *http.Response { + resp := newResponse(statusCode, "Operation Failed", req, "") + resp.Header.Set(shared.HeaderXMSErrorCode, errorCode) + return resp +} + +///////////////////////////////////////////////////////////////////////////////////////////////////////////// + +type nonRetriableError struct { + err error +} + +func (p *nonRetriableError) Error() string { + return p.err.Error() +} + +func (*nonRetriableError) NonRetriable() { + // marker method +} + +func (p *nonRetriableError) Unwrap() error { + return p.err +} + +var _ errorinfo.NonRetriable = (*nonRetriableError)(nil) diff --git a/sdk/azcore/fake/fake_test.go b/sdk/azcore/fake/fake_test.go new file mode 100644 index 000000000000..c37bd0feb88f --- /dev/null +++ b/sdk/azcore/fake/fake_test.go @@ -0,0 +1,331 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package fake + +import ( + "context" + "encoding/json" + "errors" + "io" + "net/http" + "net/url" + "strings" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" + "github.com/stretchr/testify/require" +) + +type widget struct { + Name string +} + +type widgets struct { + NextPage *string + Widgets []widget +} + +func TestNewTokenCredential(t *testing.T) { + cred := NewTokenCredential() + require.NotNil(t, cred) + + tk, err := cred.GetToken(context.Background(), policy.TokenRequestOptions{}) + require.NoError(t, err) + require.NotZero(t, tk) + + myErr := errors.New("failed") + cred.SetError(myErr) + tk, err = cred.GetToken(context.Background(), policy.TokenRequestOptions{}) + require.ErrorIs(t, err, myErr) + require.Zero(t, tk) +} + +func TestResponder(t *testing.T) { + respr := Responder[widget]{} + respr.Set(widget{Name: "foo"}) + respr.SetHeader("one", "1") + respr.SetHeader("two", "2") + + req := &http.Request{} + resp, err := MarshalResponseAsJSON(respr, req) + require.NoError(t, err) + require.NotNil(t, resp) + require.Equal(t, req, resp.Request) + require.Equal(t, "1", resp.Header.Get("one")) + require.Equal(t, "2", resp.Header.Get("two")) + + body, err := io.ReadAll(resp.Body) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + + w := widget{} + require.NoError(t, json.Unmarshal(body, &w)) + require.Equal(t, "foo", w.Name) +} + +type badWidget struct { + Count int +} + +func (badWidget) MarshalJSON() ([]byte, error) { + return nil, errors.New("failed") +} + +func (*badWidget) UnmarshalJSON([]byte) error { + return errors.New("failed") +} + +func TestResponderMarshallingError(t *testing.T) { + respr := Responder[badWidget]{} + + req := &http.Request{} + resp, err := MarshalResponseAsJSON(respr, req) + require.Error(t, err) + var nre errorinfo.NonRetriable + require.ErrorAs(t, err, &nre) + require.Nil(t, resp) +} + +func TestErrorResponder(t *testing.T) { + req := &http.Request{} + + errResp := ErrorResponder{} + require.NoError(t, GetError(errResp, req)) + + myErr := errors.New("failed") + errResp.SetError(myErr) + require.ErrorIs(t, GetError(errResp, req), myErr) + + errResp.SetResponseError("ErrorInvalidWidget", http.StatusBadRequest) + var respErr *azcore.ResponseError + require.ErrorAs(t, GetError(errResp, req), &respErr) + require.Equal(t, "ErrorInvalidWidget", respErr.ErrorCode) + require.Equal(t, http.StatusBadRequest, respErr.StatusCode) + require.NotNil(t, respErr.RawResponse) + require.Equal(t, req, respErr.RawResponse.Request) +} + +func unmarshal[T any](resp *http.Response) (T, error) { + var t T + body, err := io.ReadAll(resp.Body) + if err != nil { + return t, err + } + resp.Body.Close() + + err = json.Unmarshal(body, &t) + return t, err +} + +func TestPagerResponder(t *testing.T) { + req := &http.Request{URL: &url.URL{}} + req.URL.Scheme = "http" + req.URL.Host = "fakehost.org" + req.URL.Path = "/lister" + + pagerResp := PagerResponder[widgets]{} + + require.False(t, PagerResponderMore(&pagerResp)) + resp, err := PagerResponderNext(&pagerResp, req) + var nre errorinfo.NonRetriable + require.ErrorAs(t, err, &nre) + require.Nil(t, resp) + + pagerResp.AddError(errors.New("one")) + pagerResp.AddPage(widgets{ + Widgets: []widget{ + {Name: "foo"}, + {Name: "bar"}, + }, + }, nil) + pagerResp.AddError(errors.New("two")) + pagerResp.AddPage(widgets{ + Widgets: []widget{ + {Name: "baz"}, + }, + }, nil) + pagerResp.AddResponseError("ErrorPagerBlewUp", http.StatusBadRequest) + + PagerResponderInjectNextLinks(&pagerResp, req, func(p *widgets, create func() string) { + p.NextPage = to.Ptr(create()) + }) + + iterations := 0 + for PagerResponderMore(&pagerResp) { + resp, err := PagerResponderNext(&pagerResp, req) + switch iterations { + case 0: + require.Error(t, err) + require.Equal(t, "one", err.Error()) + require.Nil(t, resp) + case 1: + require.NoError(t, err) + require.NotNil(t, resp) + page, err := unmarshal[widgets](resp) + require.NoError(t, err) + require.NotNil(t, page.NextPage) + require.Equal(t, []widget{{Name: "foo"}, {Name: "bar"}}, page.Widgets) + case 2: + require.Error(t, err) + require.Equal(t, "two", err.Error()) + require.Nil(t, resp) + case 3: + require.NoError(t, err) + require.NotNil(t, resp) + page, err := unmarshal[widgets](resp) + require.NoError(t, err) + require.Nil(t, page.NextPage) + require.Equal(t, []widget{{Name: "baz"}}, page.Widgets) + case 4: + require.Error(t, err) + var respErr *azcore.ResponseError + require.ErrorAs(t, err, &respErr) + require.Equal(t, "ErrorPagerBlewUp", respErr.ErrorCode) + require.Equal(t, http.StatusBadRequest, respErr.StatusCode) + require.Nil(t, resp) + default: + t.Fatalf("unexpected case %d", iterations) + } + iterations++ + } + require.Equal(t, 5, iterations) +} + +func TestPollerResponder(t *testing.T) { + req := &http.Request{URL: &url.URL{}} + req.URL.Scheme = "http" + req.URL.Host = "fakehost.org" + req.URL.Path = "/lro" + + pollerResp := PollerResponder[widget]{} + + require.False(t, PollerResponderMore(&pollerResp)) + resp, err := PollerResponderNext(&pollerResp, req) + var nre errorinfo.NonRetriable + require.ErrorAs(t, err, &nre) + require.Nil(t, resp) + + pollerResp.AddNonTerminalResponse(nil) + pollerResp.AddNonTerminalError(errors.New("network glitch")) + pollerResp.AddNonTerminalResponse(nil) + pollerResp.SetTerminalResponse(widget{Name: "dodo"}) + + iterations := 0 + for PollerResponderMore(&pollerResp) { + resp, err := PollerResponderNext(&pollerResp, req) + switch iterations { + case 0: + require.NoError(t, err) + require.NotNil(t, resp) + case 1: + require.Error(t, err) + require.Nil(t, resp) + case 2: + require.NoError(t, err) + require.NotNil(t, resp) + case 3: + require.NoError(t, err) + require.NotNil(t, resp) + w, err := unmarshal[widget](resp) + require.NoError(t, err) + require.Equal(t, "dodo", w.Name) + default: + t.Fatalf("unexpected case %d", iterations) + } + iterations++ + } + require.Equal(t, 4, iterations) +} + +func TestPollerResponderTerminalFailure(t *testing.T) { + req := &http.Request{URL: &url.URL{}} + req.URL.Scheme = "http" + req.URL.Host = "fakehost.org" + req.URL.Path = "/lro" + + pollerResp := PollerResponder[widget]{} + + require.False(t, PollerResponderMore(&pollerResp)) + resp, err := PollerResponderNext(&pollerResp, req) + var nre errorinfo.NonRetriable + require.ErrorAs(t, err, &nre) + require.Nil(t, resp) + + pollerResp.AddNonTerminalError(errors.New("network glitch")) + pollerResp.AddNonTerminalResponse(nil) + pollerResp.SetTerminalError("ErrorConflictingOperation", http.StatusConflict) + + iterations := 0 + for PollerResponderMore(&pollerResp) { + resp, err := PollerResponderNext(&pollerResp, req) + switch iterations { + case 0: + require.Error(t, err) + require.Nil(t, resp) + case 1: + require.NoError(t, err) + require.NotNil(t, resp) + case 2: + require.Error(t, err) + require.Nil(t, resp) + var respErr *azcore.ResponseError + require.ErrorAs(t, err, &respErr) + require.Equal(t, "ErrorConflictingOperation", respErr.ErrorCode) + require.Equal(t, http.StatusConflict, respErr.StatusCode) + require.Equal(t, req, respErr.RawResponse.Request) + default: + t.Fatalf("unexpected case %d", iterations) + } + iterations++ + } + require.Equal(t, 3, iterations) +} + +func TestUnmarshalRequestAsJSON(t *testing.T) { + req, err := http.NewRequest(http.MethodPut, "https://foo.bar/baz", strings.NewReader(`{"Name": "foo"}`)) + require.NoError(t, err) + require.NotNil(t, req) + + w, err := UnmarshalRequestAsJSON[widget](req) + require.NoError(t, err) + require.Equal(t, "foo", w.Name) +} + +func TestUnmarshalRequestAsJSONReadFailure(t *testing.T) { + req, err := http.NewRequest(http.MethodPut, "https://foo.bar/baz", &readFailer{}) + require.NoError(t, err) + require.NotNil(t, req) + + w, err := UnmarshalRequestAsJSON[widget](req) + require.Error(t, err) + require.Zero(t, w) +} + +func TestUnmarshalRequestAsJSONUnmarshalFailure(t *testing.T) { + req, err := http.NewRequest(http.MethodPut, "https://foo.bar/baz", strings.NewReader(`{"Name": "foo"}`)) + require.NoError(t, err) + require.NotNil(t, req) + + w, err := UnmarshalRequestAsJSON[badWidget](req) + require.Error(t, err) + require.Zero(t, w) +} + +type readFailer struct { + wrapped io.ReadCloser +} + +func (r *readFailer) Close() error { + return r.wrapped.Close() +} + +func (r *readFailer) Read(p []byte) (int, error) { + return 0, errors.New("mock read failure") +} diff --git a/sdk/azcore/internal/exported/response_error.go b/sdk/azcore/internal/exported/response_error.go index 7df2f88c1c1a..76a8c068d143 100644 --- a/sdk/azcore/internal/exported/response_error.go +++ b/sdk/azcore/internal/exported/response_error.go @@ -13,6 +13,7 @@ import ( "net/http" "regexp" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" "github.com/Azure/azure-sdk-for-go/sdk/internal/exported" ) @@ -25,7 +26,7 @@ func NewResponseError(resp *http.Response) error { } // prefer the error code in the response header - if ec := resp.Header.Get("x-ms-error-code"); ec != "" { + if ec := resp.Header.Get(shared.HeaderXMSErrorCode); ec != "" { respErr.ErrorCode = ec return respErr } diff --git a/sdk/azcore/internal/exported/response_error_test.go b/sdk/azcore/internal/exported/response_error_test.go index 7b4a44150ef1..97c8bc4d6a4c 100644 --- a/sdk/azcore/internal/exported/response_error_test.go +++ b/sdk/azcore/internal/exported/response_error_test.go @@ -13,6 +13,8 @@ import ( "net/url" "strings" "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" ) func TestNewResponseErrorNoBodyNoErrorCode(t *testing.T) { @@ -59,7 +61,7 @@ func TestNewResponseErrorNoBody(t *testing.T) { } respHeader := http.Header{} const errorCode = "ErrorTooManyCheats" - respHeader.Set("x-ms-error-code", errorCode) + respHeader.Set(shared.HeaderXMSErrorCode, errorCode) err = NewResponseError(&http.Response{ Status: "the system is down", StatusCode: http.StatusInternalServerError, @@ -136,7 +138,7 @@ func TestNewResponseErrorPreferErrorCodeHeader(t *testing.T) { t.Fatal(err) } respHeader := http.Header{} - respHeader.Set("x-ms-error-code", "ErrorTooManyCheats") + respHeader.Set(shared.HeaderXMSErrorCode, "ErrorTooManyCheats") err = NewResponseError(&http.Response{ Status: "the system is down", StatusCode: http.StatusInternalServerError, @@ -317,7 +319,7 @@ func TestNewResponseErrorErrorCodeHeaderXML(t *testing.T) { t.Fatal(err) } respHeader := http.Header{} - respHeader.Set("x-ms-error-code", "ContainerAlreadyExists") + respHeader.Set(shared.HeaderXMSErrorCode, "ContainerAlreadyExists") err = NewResponseError(&http.Response{ Status: "the system is down", StatusCode: http.StatusInternalServerError, @@ -354,7 +356,7 @@ func TestNewResponseErrorErrorCodeHeaderXMLWithNamespace(t *testing.T) { t.Fatal(err) } respHeader := http.Header{} - respHeader.Set("x-ms-error-code", "ContainerAlreadyExists") + respHeader.Set(shared.HeaderXMSErrorCode, "ContainerAlreadyExists") err = NewResponseError(&http.Response{ Status: "the system is down", StatusCode: http.StatusInternalServerError, diff --git a/sdk/azcore/internal/pollers/fake/fake.go b/sdk/azcore/internal/pollers/fake/fake.go new file mode 100644 index 000000000000..15adbee29f09 --- /dev/null +++ b/sdk/azcore/internal/pollers/fake/fake.go @@ -0,0 +1,118 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package fake + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" +) + +// Applicable returns true if the LRO is a fake. +func Applicable(resp *http.Response) bool { + return resp.Header.Get(shared.HeaderFakePollerStatus) != "" +} + +// CanResume returns true if the token can rehydrate this poller type. +func CanResume(token map[string]interface{}) bool { + _, ok := token["fakeURL"] + return ok +} + +// Poller is an LRO poller that uses the Core-Fake-Poller pattern. +type Poller[T any] struct { + pl exported.Pipeline + + resp *http.Response + + // The API name from CtxAPINameKey + APIName string `json:"apiName"` + + // The URL from Core-Fake-Poller header. + FakeURL string `json:"fakeURL"` + + // The LRO's current state. + FakeStatus string `json:"status"` +} + +// New creates a new Poller from the provided initial response. +// Pass nil for response to create an empty Poller for rehydration. +func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) { + if resp == nil { + log.Write(log.EventLRO, "Resuming Core-Fake-Poller poller.") + return &Poller[T]{pl: pl}, nil + } + + log.Write(log.EventLRO, "Using Core-Fake-Poller poller.") + fakeStatus := resp.Header.Get(shared.HeaderFakePollerStatus) + if fakeStatus == "" { + return nil, errors.New("response is missing Fake-Poller-Status header") + } + + ctxVal := resp.Request.Context().Value(shared.CtxAPINameKey{}) + if ctxVal == nil { + return nil, errors.New("missing value for CtxAPINameKey") + } + + apiName, ok := ctxVal.(string) + if !ok { + return nil, fmt.Errorf("expected string for CtxAPINameKey, the type was %T", ctxVal) + } + + p := &Poller[T]{ + pl: pl, + resp: resp, + APIName: apiName, + FakeURL: fmt.Sprintf("%s://%s%s/get/fake/status", resp.Request.URL.Scheme, resp.Request.URL.Host, resp.Request.URL.Path), + FakeStatus: fakeStatus, + } + return p, nil +} + +// Done returns true if the LRO is in a terminal state. +func (p *Poller[T]) Done() bool { + return poller.IsTerminalState(p.FakeStatus) +} + +// Poll retrieves the current state of the LRO. +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + ctx = context.WithValue(ctx, shared.CtxAPINameKey{}, p.APIName) + err := pollers.PollHelper(ctx, p.FakeURL, p.pl, func(resp *http.Response) (string, error) { + if !poller.StatusCodeValid(resp) { + p.resp = resp + return "", exported.NewResponseError(resp) + } + fakeStatus := resp.Header.Get(shared.HeaderFakePollerStatus) + if fakeStatus == "" { + return "", errors.New("response is missing Fake-Poller-Status header") + } + p.resp = resp + p.FakeStatus = fakeStatus + return p.FakeStatus, nil + }) + if err != nil { + return nil, err + } + return p.resp, nil +} + +func (p *Poller[T]) Result(ctx context.Context, out *T) error { + if p.resp.StatusCode == http.StatusNoContent { + return nil + } else if poller.Failed(p.FakeStatus) { + return exported.NewResponseError(p.resp) + } + + return pollers.ResultHelper(p.resp, poller.Failed(p.FakeStatus), out) +} diff --git a/sdk/azcore/internal/pollers/fake/fake_test.go b/sdk/azcore/internal/pollers/fake/fake_test.go new file mode 100644 index 000000000000..0a32d6dd3a86 --- /dev/null +++ b/sdk/azcore/internal/pollers/fake/fake_test.go @@ -0,0 +1,185 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package fake + +import ( + "context" + "io" + "net/http" + "strings" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" + "github.com/stretchr/testify/require" +) + +const ( + fakePollingURL = "https://foo.bar.baz/status" + fakeResourceURL = "https://foo.bar.baz/resource" +) + +func initialResponse(ctx context.Context, method string, resp io.Reader) *http.Response { + req, err := http.NewRequestWithContext(ctx, method, fakeResourceURL, nil) + if err != nil { + panic(err) + } + return &http.Response{ + Body: io.NopCloser(resp), + Header: http.Header{}, + Request: req, + } +} + +func TestApplicable(t *testing.T) { + resp := &http.Response{ + Header: http.Header{}, + } + require.False(t, Applicable(resp), "missing Fake-Poller-Status should not be applicable") + resp.Header.Set(shared.HeaderFakePollerStatus, fakePollingURL) + require.True(t, Applicable(resp), "having Fake-Poller-Status should be applicable") +} + +func TestCanResume(t *testing.T) { + token := map[string]interface{}{} + require.False(t, CanResume(token)) + token["fakeURL"] = fakePollingURL + require.True(t, CanResume(token)) +} + +func TestNew(t *testing.T) { + fp, err := New[struct{}](exported.Pipeline{}, nil) + require.NoError(t, err) + require.Empty(t, fp.FakeStatus) + + fp, err = New[struct{}](exported.Pipeline{}, &http.Response{Header: http.Header{}}) + require.Error(t, err) + require.Nil(t, fp) + + resp := initialResponse(context.Background(), http.MethodPut, http.NoBody) + resp.Header.Set(shared.HeaderFakePollerStatus, "faking") + fp, err = New[struct{}](exported.Pipeline{}, resp) + require.Error(t, err) + require.Nil(t, fp) + + resp = initialResponse(context.WithValue(context.Background(), shared.CtxAPINameKey{}, 123), http.MethodPut, http.NoBody) + resp.Header.Set(shared.HeaderFakePollerStatus, "faking") + fp, err = New[struct{}](exported.Pipeline{}, resp) + require.Error(t, err) + require.Nil(t, fp) + + resp = initialResponse(context.WithValue(context.Background(), shared.CtxAPINameKey{}, "FakeAPI"), http.MethodPut, http.NoBody) + resp.Header.Set(shared.HeaderFakePollerStatus, "faking") + fp, err = New[struct{}](exported.Pipeline{}, resp) + require.NoError(t, err) + require.NotNil(t, fp) + require.False(t, fp.Done()) +} + +func TestSynchronousCompletion(t *testing.T) { + resp := initialResponse(context.WithValue(context.Background(), shared.CtxAPINameKey{}, "FakeAPI"), http.MethodPut, http.NoBody) + resp.StatusCode = http.StatusNoContent + resp.Header.Set(shared.HeaderFakePollerStatus, poller.StatusSucceeded) + fp, err := New[struct{}](exported.Pipeline{}, resp) + require.NoError(t, err) + require.Equal(t, poller.StatusSucceeded, fp.FakeStatus) + require.True(t, fp.Done()) + require.NoError(t, fp.Result(context.Background(), nil)) +} + +type widget struct { + Shape string `json:"shape"` +} + +func TestPollSucceeded(t *testing.T) { + pollCtx := context.WithValue(context.Background(), shared.CtxAPINameKey{}, "FakeAPI") + resp := initialResponse(pollCtx, http.MethodPatch, http.NoBody) + resp.Header.Set(shared.HeaderFakePollerStatus, poller.StatusInProgress) + poller, err := New[widget](exported.NewPipeline(shared.TransportFunc(func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusOK, + Header: http.Header{shared.HeaderFakePollerStatus: []string{"Succeeded"}}, + Body: io.NopCloser(strings.NewReader(`{ "shape": "triangle" }`)), + }, nil + })), resp) + require.NoError(t, err) + require.False(t, poller.Done()) + resp, err = poller.Poll(pollCtx) + require.NoError(t, err) + require.Equal(t, http.StatusOK, resp.StatusCode) + require.True(t, poller.Done()) + var result widget + require.NoError(t, poller.Result(context.Background(), &result)) + require.EqualValues(t, "triangle", result.Shape) +} + +func TestPollError(t *testing.T) { + pollCtx := context.WithValue(context.Background(), shared.CtxAPINameKey{}, "FakeAPI") + resp := initialResponse(pollCtx, http.MethodPatch, http.NoBody) + resp.Header.Set(shared.HeaderFakePollerStatus, poller.StatusInProgress) + poller, err := New[widget](exported.NewPipeline(shared.TransportFunc(func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusNotFound, + Header: http.Header{shared.HeaderFakePollerStatus: []string{poller.StatusFailed}}, + Body: io.NopCloser(strings.NewReader(`{ "error": { "code": "NotFound", "message": "the item doesn't exist" } }`)), + }, nil + })), resp) + require.NoError(t, err) + require.False(t, poller.Done()) + resp, err = poller.Poll(pollCtx) + require.Error(t, err) + require.Nil(t, resp) + var respErr *exported.ResponseError + require.ErrorAs(t, err, &respErr) + require.Equal(t, http.StatusNotFound, respErr.StatusCode) + require.False(t, poller.Done()) + var result widget + require.Error(t, poller.Result(context.Background(), &result)) + require.ErrorAs(t, err, &respErr) +} + +func TestPollFailed(t *testing.T) { + pollCtx := context.WithValue(context.Background(), shared.CtxAPINameKey{}, "FakeAPI") + resp := initialResponse(pollCtx, http.MethodPatch, http.NoBody) + resp.Header.Set(shared.HeaderFakePollerStatus, poller.StatusInProgress) + poller, err := New[widget](exported.NewPipeline(shared.TransportFunc(func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusOK, + Header: http.Header{shared.HeaderFakePollerStatus: []string{poller.StatusFailed}}, + Body: io.NopCloser(strings.NewReader(`{ "error": { "code": "FakeFailure", "message": "couldn't do the thing" } }`)), + }, nil + })), resp) + require.NoError(t, err) + require.False(t, poller.Done()) + resp, err = poller.Poll(pollCtx) + require.NoError(t, err) + require.Equal(t, http.StatusOK, resp.StatusCode) + require.True(t, poller.Done()) + var result widget + var respErr *exported.ResponseError + err = poller.Result(context.Background(), &result) + require.Error(t, err) + require.ErrorAs(t, err, &respErr) +} + +func TestPollErrorNoHeader(t *testing.T) { + pollCtx := context.WithValue(context.Background(), shared.CtxAPINameKey{}, "FakeAPI") + resp := initialResponse(pollCtx, http.MethodPatch, http.NoBody) + resp.Header.Set(shared.HeaderFakePollerStatus, poller.StatusInProgress) + poller, err := New[widget](exported.NewPipeline(shared.TransportFunc(func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusNotFound, + Body: io.NopCloser(strings.NewReader(`{ "error": { "code": "NotFound", "message": "the item doesn't exist" } }`)), + }, nil + })), resp) + require.NoError(t, err) + require.False(t, poller.Done()) + resp, err = poller.Poll(pollCtx) + require.Error(t, err) + require.Nil(t, resp) +} diff --git a/sdk/azcore/internal/shared/constants.go b/sdk/azcore/internal/shared/constants.go index dcd3d098b339..01f802537e4c 100644 --- a/sdk/azcore/internal/shared/constants.go +++ b/sdk/azcore/internal/shared/constants.go @@ -17,6 +17,7 @@ const ( HeaderAzureAsync = "Azure-AsyncOperation" HeaderContentLength = "Content-Length" HeaderContentType = "Content-Type" + HeaderFakePollerStatus = "Fake-Poller-Status" HeaderLocation = "Location" HeaderOperationLocation = "Operation-Location" HeaderRetryAfter = "Retry-After" @@ -24,6 +25,7 @@ const ( HeaderWWWAuthenticate = "WWW-Authenticate" HeaderXMSClientRequestID = "x-ms-client-request-id" HeaderXMSRequestID = "x-ms-request-id" + HeaderXMSErrorCode = "x-ms-error-code" ) const BearerTokenPrefix = "Bearer " diff --git a/sdk/azcore/internal/shared/shared.go b/sdk/azcore/internal/shared/shared.go index 9bd054b3643e..69153854c77a 100644 --- a/sdk/azcore/internal/shared/shared.go +++ b/sdk/azcore/internal/shared/shared.go @@ -29,6 +29,9 @@ type CtxIncludeResponseKey struct{} // CtxWithTracingTracer is used as a context key for adding/retrieving tracing.Tracer. type CtxWithTracingTracer struct{} +// CtxAPINameKey is used as a context key for adding/retrieving the API name. +type CtxAPINameKey struct{} + // Delay waits for the duration to elapse or the context to be cancelled. func Delay(ctx context.Context, delay time.Duration) error { select { diff --git a/sdk/azcore/runtime/poller.go b/sdk/azcore/runtime/poller.go index e57ad240dc04..c373f68962e3 100644 --- a/sdk/azcore/runtime/poller.go +++ b/sdk/azcore/runtime/poller.go @@ -22,6 +22,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" @@ -90,7 +91,9 @@ func NewPoller[T any](resp *http.Response, pl exported.Pipeline, options *NewPol // determine the polling method var opr PollingHandler[T] var err error - if async.Applicable(resp) { + if fake.Applicable(resp) { + opr, err = fake.New[T](pl, resp) + } else if async.Applicable(resp) { // async poller must be checked first as it can also have a location header opr, err = async.New[T](pl, resp, options.FinalStateVia) } else if op.Applicable(resp) { @@ -158,7 +161,9 @@ func NewPollerFromResumeToken[T any](token string, pl exported.Pipeline, options opr := options.Handler // now rehydrate the poller based on the encoded poller type - if opr != nil { + if fake.CanResume(asJSON) { + opr, _ = fake.New[T](pl, nil) + } else if opr != nil { log.Writef(log.EventLRO, "Resuming custom poller %T.", opr) } else if async.CanResume(asJSON) { opr, _ = async.New[T](pl, nil, "") diff --git a/sdk/azcore/runtime/poller_test.go b/sdk/azcore/runtime/poller_test.go index 7811f0fe51ab..a16f99f667df 100644 --- a/sdk/azcore/runtime/poller_test.go +++ b/sdk/azcore/runtime/poller_test.go @@ -23,9 +23,11 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/internal/mock" + "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" "github.com/stretchr/testify/require" ) @@ -771,8 +773,8 @@ func getPipeline(srv *mock.Server) Pipeline { ) } -func initialResponse(method, u string, resp io.Reader) (*http.Response, mock.TrackedClose) { - req, err := http.NewRequest(method, u, nil) +func initialResponse(ctx context.Context, method, u string, resp io.Reader) (*http.Response, mock.TrackedClose) { + req, err := http.NewRequestWithContext(ctx, method, u, nil) if err != nil { panic(err) } @@ -795,7 +797,7 @@ func TestNewPollerAsync(t *testing.T) { srv.AppendResponse(mock.WithBody([]byte(statusInProgress))) srv.AppendResponse(mock.WithBody([]byte(statusSucceeded))) srv.AppendResponse(mock.WithBody([]byte(successResp))) - resp, closed := initialResponse(http.MethodPut, srv.URL(), strings.NewReader(provStateStarted)) + resp, closed := initialResponse(context.Background(), http.MethodPut, srv.URL(), strings.NewReader(provStateStarted)) resp.Header.Set(shared.HeaderAzureAsync, srv.URL()) resp.StatusCode = http.StatusCreated pl := getPipeline(srv) @@ -838,7 +840,7 @@ func TestNewPollerBody(t *testing.T) { defer close() srv.AppendResponse(mock.WithBody([]byte(provStateUpdating)), mock.WithHeader("Retry-After", "1")) srv.AppendResponse(mock.WithBody([]byte(provStateSucceeded))) - resp, closed := initialResponse(http.MethodPatch, srv.URL(), strings.NewReader(provStateStarted)) + resp, closed := initialResponse(context.Background(), http.MethodPatch, srv.URL(), strings.NewReader(provStateStarted)) resp.StatusCode = http.StatusCreated pl := getPipeline(srv) poller, err := NewPoller[mockType](resp, pl, nil) @@ -874,7 +876,7 @@ func TestNewPollerInitialRetryAfter(t *testing.T) { srv.AppendResponse(mock.WithBody([]byte(statusInProgress))) srv.AppendResponse(mock.WithBody([]byte(statusSucceeded))) srv.AppendResponse(mock.WithBody([]byte(successResp))) - resp, closed := initialResponse(http.MethodPut, srv.URL(), strings.NewReader(provStateStarted)) + resp, closed := initialResponse(context.Background(), http.MethodPut, srv.URL(), strings.NewReader(provStateStarted)) resp.Header.Set(shared.HeaderAzureAsync, srv.URL()) resp.Header.Set("Retry-After", "1") resp.StatusCode = http.StatusCreated @@ -903,7 +905,7 @@ func TestNewPollerCanceled(t *testing.T) { defer close() srv.AppendResponse(mock.WithBody([]byte(statusInProgress))) srv.AppendResponse(mock.WithBody([]byte(statusCanceled)), mock.WithStatusCode(http.StatusOK)) - resp, closed := initialResponse(http.MethodPut, srv.URL(), strings.NewReader(provStateStarted)) + resp, closed := initialResponse(context.Background(), http.MethodPut, srv.URL(), strings.NewReader(provStateStarted)) resp.Header.Set(shared.HeaderAzureAsync, srv.URL()) resp.StatusCode = http.StatusCreated pl := getPipeline(srv) @@ -941,7 +943,7 @@ func TestNewPollerFailed(t *testing.T) { srv, close := mock.NewServer() defer close() srv.AppendResponse(mock.WithBody([]byte(provStateFailed))) - resp, closed := initialResponse(http.MethodPut, srv.URL(), strings.NewReader(provStateStarted)) + resp, closed := initialResponse(context.Background(), http.MethodPut, srv.URL(), strings.NewReader(provStateStarted)) resp.Header.Set(shared.HeaderAzureAsync, srv.URL()) resp.StatusCode = http.StatusCreated pl := getPipeline(srv) @@ -966,7 +968,7 @@ func TestNewPollerFailedWithError(t *testing.T) { defer close() srv.AppendResponse(mock.WithBody([]byte(statusInProgress))) srv.AppendResponse(mock.WithStatusCode(http.StatusBadRequest)) - resp, closed := initialResponse(http.MethodPut, srv.URL(), strings.NewReader(provStateStarted)) + resp, closed := initialResponse(context.Background(), http.MethodPut, srv.URL(), strings.NewReader(provStateStarted)) resp.Header.Set(shared.HeaderAzureAsync, srv.URL()) resp.StatusCode = http.StatusCreated pl := getPipeline(srv) @@ -991,7 +993,7 @@ func TestNewPollerSuccessNoContent(t *testing.T) { defer close() srv.AppendResponse(mock.WithBody([]byte(provStateUpdating))) srv.AppendResponse(mock.WithStatusCode(http.StatusNoContent)) - resp, closed := initialResponse(http.MethodPatch, srv.URL(), strings.NewReader(provStateStarted)) + resp, closed := initialResponse(context.Background(), http.MethodPatch, srv.URL(), strings.NewReader(provStateStarted)) resp.StatusCode = http.StatusCreated pl := getPipeline(srv) poller, err := NewPoller[mockType](resp, pl, nil) @@ -1024,7 +1026,7 @@ func TestNewPollerSuccessNoContent(t *testing.T) { func TestNewPollerFail202NoHeaders(t *testing.T) { srv, close := mock.NewServer() defer close() - resp, closed := initialResponse(http.MethodDelete, srv.URL(), http.NoBody) + resp, closed := initialResponse(context.Background(), http.MethodDelete, srv.URL(), http.NoBody) resp.StatusCode = http.StatusAccepted pl := getPipeline(srv) poller, err := NewPoller[mockType](resp, pl, nil) @@ -1049,7 +1051,7 @@ func TestNewPollerWithResponseType(t *testing.T) { defer close() srv.AppendResponse(mock.WithBody([]byte(provStateUpdating)), mock.WithHeader("Retry-After", "1")) srv.AppendResponse(mock.WithBody([]byte(provStateSucceeded))) - resp, closed := initialResponse(http.MethodPatch, srv.URL(), strings.NewReader(provStateStarted)) + resp, closed := initialResponse(context.Background(), http.MethodPatch, srv.URL(), strings.NewReader(provStateStarted)) resp.StatusCode = http.StatusCreated pl := getPipeline(srv) poller, err := NewPoller[preconstructedMockType](resp, pl, nil) @@ -1146,7 +1148,7 @@ func TestNewPollerWithCustomHandler(t *testing.T) { srv.AppendResponse(mock.WithBody([]byte(statusInProgress))) srv.AppendResponse(mock.WithBody([]byte(statusSucceeded))) srv.AppendResponse(mock.WithBody([]byte(successResp))) - resp, closed := initialResponse(http.MethodPut, srv.URL(), strings.NewReader(provStateStarted)) + resp, closed := initialResponse(context.Background(), http.MethodPut, srv.URL(), strings.NewReader(provStateStarted)) resp.Header.Set(shared.HeaderAzureAsync, srv.URL()) resp.StatusCode = http.StatusCreated pl := getPipeline(srv) @@ -1190,3 +1192,28 @@ func TestShortenPollerTypeName(t *testing.T) { result = shortenTypeName("Poller.PollUntilDone") require.EqualValues(t, "Poller.PollUntilDone", result) } + +func TestNewFakePoller(t *testing.T) { + srv, close := mock.NewServer() + defer close() + srv.AppendResponse(mock.WithHeader(shared.HeaderFakePollerStatus, "FakePollerInProgress")) + srv.AppendResponse(mock.WithHeader(shared.HeaderFakePollerStatus, poller.StatusSucceeded), mock.WithStatusCode(http.StatusNoContent)) + pollCtx := context.WithValue(context.Background(), shared.CtxAPINameKey{}, "FakeAPI") + resp, closed := initialResponse(pollCtx, http.MethodPatch, srv.URL(), http.NoBody) + resp.StatusCode = http.StatusCreated + resp.Header.Set(shared.HeaderFakePollerStatus, "FakePollerInProgress") + pl := getPipeline(srv) + poller, err := NewPoller[mockType](resp, pl, nil) + require.NoError(t, err) + require.True(t, closed()) + if pt := typeOfOpField(poller); pt != reflect.TypeOf((*fake.Poller[mockType])(nil)) { + t.Fatalf("unexpected poller type %s", pt.String()) + } + tk, err := poller.ResumeToken() + require.NoError(t, err) + poller, err = NewPollerFromResumeToken[mockType](tk, pl, nil) + require.NoError(t, err) + result, err := poller.PollUntilDone(context.Background(), &PollUntilDoneOptions{Frequency: time.Millisecond}) + require.NoError(t, err) + require.Nil(t, result.Field) +} diff --git a/sdk/azcore/runtime/request.go b/sdk/azcore/runtime/request.go index 98e00718488e..cdbf8bde60b0 100644 --- a/sdk/azcore/runtime/request.go +++ b/sdk/azcore/runtime/request.go @@ -169,6 +169,9 @@ func SkipBodyDownload(req *policy.Request) { req.SetOperationValue(bodyDownloadPolicyOpValues{Skip: true}) } +// CtxAPINameKey is used as a context key for adding/retrieving the API name. +type CtxAPINameKey = shared.CtxAPINameKey + // returns a clone of the object graph pointed to by v, omitting values of all read-only // fields. if there are no read-only fields in the object graph, no clone is created. func cloneWithoutReadOnlyFields(v interface{}) interface{} { From 1a145c57a44f6083fc652863450d2a13cc450fb1 Mon Sep 17 00:00:00 2001 From: siminsavani-msft <77068571+siminsavani-msft@users.noreply.github.com> Date: Tue, 9 May 2023 14:01:56 -0400 Subject: [PATCH 40/50] Updating CHANGELOG.md (#20809) --- sdk/storage/azblob/CHANGELOG.md | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/sdk/storage/azblob/CHANGELOG.md b/sdk/storage/azblob/CHANGELOG.md index 26b05273b1c5..ba42a8d03b8d 100644 --- a/sdk/storage/azblob/CHANGELOG.md +++ b/sdk/storage/azblob/CHANGELOG.md @@ -1,6 +1,6 @@ # Release History -## 1.0.1 (Unreleased) +## 1.0.1 (2023-05-09) ### Features Added @@ -11,15 +11,11 @@ * Added support for CopySourceAuthorization to appendblob.AppendBlockFromURL * Added support for tag permission in Container SAS. -### Breaking Changes - ### Bugs Fixed * Fixed time formatting for the conditional request headers. Fixes [#20475](https://github.com/Azure/azure-sdk-for-go/issues/20475). * Fixed an issue where passing a blob tags map of length 0 would result in the x-ms-tags header to be sent to the service with an empty string as value. -### Other Changes - ## 1.0.0 (2023-02-07) ### Features Added From 90dfc5c8a680e47a43960213cd981071450f610e Mon Sep 17 00:00:00 2001 From: Tamer Sherif <69483382+tasherif-msft@users.noreply.github.com> Date: Tue, 9 May 2023 11:07:46 -0700 Subject: [PATCH 41/50] changelog (#20811) --- sdk/storage/azqueue/CHANGELOG.md | 9 +++------ sdk/storage/azqueue/internal/exported/version.go | 2 +- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/sdk/storage/azqueue/CHANGELOG.md b/sdk/storage/azqueue/CHANGELOG.md index 81127f99b819..af4b82990e16 100644 --- a/sdk/storage/azqueue/CHANGELOG.md +++ b/sdk/storage/azqueue/CHANGELOG.md @@ -1,14 +1,11 @@ ## Release History -### 0.1.1 (Unreleased) +### 1.0.0 (2023-05-09) -#### Features Added - -#### Breaking Changes +### Features Added -#### Bugs Fixed +* This is the initial GA release of the `azqueue` library -#### Other Changes ### 0.1.0 (2023-02-15) diff --git a/sdk/storage/azqueue/internal/exported/version.go b/sdk/storage/azqueue/internal/exported/version.go index 9ad8cee51fad..abf66d142113 100644 --- a/sdk/storage/azqueue/internal/exported/version.go +++ b/sdk/storage/azqueue/internal/exported/version.go @@ -8,5 +8,5 @@ package exported const ( ModuleName = "azqueue" - ModuleVersion = "v0.1.1" + ModuleVersion = "v1.0.0" ) From c7eda59142b90ed5e66f40d116b109130a8ea290 Mon Sep 17 00:00:00 2001 From: Azure SDK Bot <53356347+azure-sdk@users.noreply.github.com> Date: Tue, 9 May 2023 11:26:12 -0700 Subject: [PATCH 42/50] Increment package version after release of storage/azfile (#20813) --- sdk/storage/azfile/CHANGELOG.md | 10 ++++++++++ sdk/storage/azfile/internal/exported/version.go | 2 +- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/sdk/storage/azfile/CHANGELOG.md b/sdk/storage/azfile/CHANGELOG.md index b14cf748a116..0ae008e2971f 100644 --- a/sdk/storage/azfile/CHANGELOG.md +++ b/sdk/storage/azfile/CHANGELOG.md @@ -1,5 +1,15 @@ # Release History +## 0.1.1 (Unreleased) + +### Features Added + +### Breaking Changes + +### Bugs Fixed + +### Other Changes + ## 0.1.0 (2023-05-09) ### Features Added diff --git a/sdk/storage/azfile/internal/exported/version.go b/sdk/storage/azfile/internal/exported/version.go index 8e130784dbf2..fbe1575df39c 100644 --- a/sdk/storage/azfile/internal/exported/version.go +++ b/sdk/storage/azfile/internal/exported/version.go @@ -8,5 +8,5 @@ package exported const ( ModuleName = "azfile" - ModuleVersion = "v0.1.0" + ModuleVersion = "v0.1.1" ) From 7fac0b5c23d00d601aef3c8439246a2be683556f Mon Sep 17 00:00:00 2001 From: siminsavani-msft <77068571+siminsavani-msft@users.noreply.github.com> Date: Tue, 9 May 2023 14:59:58 -0400 Subject: [PATCH 43/50] Update changelog (azblob) (#20815) * Updating CHANGELOG.md * Update the changelog with correct version --- sdk/storage/azblob/CHANGELOG.md | 2 +- sdk/storage/azblob/internal/exported/version.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/sdk/storage/azblob/CHANGELOG.md b/sdk/storage/azblob/CHANGELOG.md index ba42a8d03b8d..786f837b0870 100644 --- a/sdk/storage/azblob/CHANGELOG.md +++ b/sdk/storage/azblob/CHANGELOG.md @@ -1,6 +1,6 @@ # Release History -## 1.0.1 (2023-05-09) +## 1.1.0-beta.1 (2023-05-09) ### Features Added diff --git a/sdk/storage/azblob/internal/exported/version.go b/sdk/storage/azblob/internal/exported/version.go index 19c92b607d90..7954108e6d27 100644 --- a/sdk/storage/azblob/internal/exported/version.go +++ b/sdk/storage/azblob/internal/exported/version.go @@ -8,5 +8,5 @@ package exported const ( ModuleName = "azblob" - ModuleVersion = "v1.0.1" + ModuleVersion = "v1.1.0-beta.1" ) From 498a2eff41ecc251950868d9810c61229f5a9050 Mon Sep 17 00:00:00 2001 From: gracewilcox <43627800+gracewilcox@users.noreply.github.com> Date: Tue, 9 May 2023 12:53:11 -0700 Subject: [PATCH 44/50] [azquery] migration guide (#20742) * migration guide * Charles feedback * Richard feedback --------- Co-authored-by: Charles Lowell <10964656+chlowell@users.noreply.github.com> --- sdk/monitor/azquery/MIGRATION.md | 177 +++++++++++++++++++++++++++++++ 1 file changed, 177 insertions(+) create mode 100644 sdk/monitor/azquery/MIGRATION.md diff --git a/sdk/monitor/azquery/MIGRATION.md b/sdk/monitor/azquery/MIGRATION.md new file mode 100644 index 000000000000..caf6c162586b --- /dev/null +++ b/sdk/monitor/azquery/MIGRATION.md @@ -0,0 +1,177 @@ +# Guide to migrate from `operationalinsights` and monitor `insights` to `azquery` + +This guide is intended to assist in the migration to the `azquery` module. `azquery` allows users to retrieve log and metric data from Azure Monitor. + +## Package consolidation + + Azure Monitor allows users to retrieve telemetry data for their Azure resources. The main two data catagories for Azure Monitor are [metrics](https://learn.microsoft.com/azure/azure-monitor/essentials/data-platform-metrics) and [logs](https://learn.microsoft.com/azure/azure-monitor/logs/data-platform-logs). + + There have been a number of [terminology](https://learn.microsoft.com/azure/azure-monitor/terminology) changes for Azure Monitor over the years which resulted in the operations being spread over multiple packages. For Go, metrics methods were contained in `github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt//insights` and logs methods resided in `github.com/Azure/azure-sdk-for-go/services/operationalinsights/v1/operationalinsights`. + +The new `azquery` module condenses metrics and logs functionality into one package for simpler access. The `azquery` module contains two clients: [LogsClient](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery#LogsClient) and [MetricsClient](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery#MetricsClient). + +Transitioning to a single package has resulted in a number of name changes, as detailed below. + +### Log name changes + +| `operationalinsights` | `azquery` | +| ----------- | ----------- | +| QueryClient.Execute | LogsClient.QueryWorkspace | +| MetadataClient.Get and MetadataClient.Post | N/A | + +The `azquery` module does not contain the `MetadataClient`. For that functionality, please use the old [`operationalinsights`](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/services/operationalinsights/v1/operationalinsights) module or [file an issue in our github repo](https://github.com/Azure/azure-sdk-for-go/issues), so we can prioritize adding it to `azquery`. + +### Metrics name changes + +| `insights` | `azquery` | +| ----------- | ----------- | +| MetricsClient.List | MetricsClient.QueryResource | +| MetricDefinitionsClient.List | MetricsClient.NewListDefinitionsPager | +| MetricNamespacesClient.List | MetricsClient.NewListNamespacesPager | + +## Query Logs + +### `operationalinsights` +```go +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/services/operationalinsights/v1/operationalinsights" + "github.com/Azure/go-autorest/autorest" +) + +// create the client +client := operationalinsights.NewQueryClient() +client.Authorizer = autorest.NewAPIKeyAuthorizerWithHeaders(map[string]interface{}{ + "x-api-key": "DEMO_KEY", +}) + +// execute the query +query := "" +timespan := "2023-12-25/2023-12-26" + +res, err := client.Execute(context.TODO(), "DEMO_WORKSPACE", operationalinsights.QueryBody{Query: &query, Timespan: ×pan}) +if err != nil { + //TODO: handle error +} +``` + +### `azquery` + +Compared to previous versions, querying logs with the new `azquery` module is clearer and simpler. There are a number of name changes for clarity, like how the old `Execute` method is now [`QueryWorkspace`](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery#LogsClient.QueryWorkspace). In addition, there is improved time support. Before if a user added a timespan over which to query the request, it had to be a string constructed in the ISO8601 interval format. Users frequently made mistakes when constructing this string. With the new `QueryWorkspace` method, the type of timespan has been changed from a string to a new type named [`TimeInterval`](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery#TimeInterval). `TimeInterval` has a contructor that allows users to take advantage of Go's time package, allowing easier creation. + +```go +import ( + "context" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery" +) + +// create the logs client +cred, err := azidentity.NewDefaultAzureCredential(nil) +if err != nil { + //TODO: handle error +} +client, err := azquery.NewLogsClient(cred, nil) +if err != nil { + //TODO: handle error +} + +// execute the logs query +res, err := client.QueryWorkspace(context.TODO(), workspaceID, + azquery.Body{ + Query: to.Ptr(""), + Timespan: to.Ptr(azquery.NewTimeInterval(time.Date(2022, 12, 25, 0, 0, 0, 0, time.UTC), time.Date(2022, 12, 25, 12, 0, 0, 0, time.UTC))), + }, + nil) +if err != nil { + //TODO: handle error +} +if res.Error != nil { + //TODO: handle partial error +} +``` + +## Query Metrics + +### `insights` + +```go +import ( + "context" + + "github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2022-10-01-preview/insights" + "github.com/Azure/go-autorest/autorest/azure/auth" +) + +// create the client +client := insights.NewMetricsClient("") +authorizer, err := auth.NewAuthorizerFromCLI() +if err == nil { + client.Authorizer = authorizer +} + +// execute the query +timespan := "2023-12-25/2023-12-26" +interval := "PT1M" +metricnames := "" +aggregation := "Average" +top := 3 +orderby := "Average asc" +filter := "BlobType eq '*'" +resultType := insights.ResultTypeData +metricnamespace := "Microsoft.Storage/storageAccounts/blobServices" + +res, err := client.List(context.TODO(), resourceURI, timespan, &interval, metricnames, aggregation, &top, orderby, filter, resultType, metricnamespace) +if err != nil { + //TODO: handle error +} +``` + +### `azquery` + +The main difference between the old and new methods of querying metrics is in the naming. The new method has an updated convention for clarity. For example, the old name of the method was simply `List`. Now, it's `QueryResource`. There have also been a number of casing fixes and the query options have been moved into the options struct. + +```go +import ( + "context" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/monitor/azquery" +) + +// create the metrics client +cred, err := azidentity.NewDefaultAzureCredential(nil) +if err != nil { + //TODO: handle error +} +client, err := azquery.NewMetricsClient(cred, nil) +if err != nil { + //TODO: handle error +} + +// execute the metrics query +res, err := metricsClient.QueryResource(context.TODO(), resourceURI, + &azquery.MetricsClientQueryResourceOptions{ + Timespan: to.Ptr(azquery.NewTimeInterval(time.Date(2022, 12, 25, 0, 0, 0, 0, time.UTC), time.Date(2022, 12, 25, 12, 0, 0, 0, time.UTC))), + Interval: to.Ptr("PT1M"), + MetricNames: nil, + Aggregation: to.SliceOfPtrs(azquery.AggregationTypeAverage, azquery.AggregationTypeCount), + Top: to.Ptr[int32](3), + OrderBy: to.Ptr("Average asc"), + Filter: to.Ptr("BlobType eq '*'"), + ResultType: nil, + MetricNamespace: to.Ptr("Microsoft.Storage/storageAccounts/blobServices"), + }) +if err != nil { + //TODO: handle error +} +``` + + + From ccb967e39850344a3c0d4bd7c6e0c50e359ecf65 Mon Sep 17 00:00:00 2001 From: Azure SDK Bot <53356347+azure-sdk@users.noreply.github.com> Date: Tue, 9 May 2023 15:19:07 -0700 Subject: [PATCH 45/50] Increment package version after release of monitor/azquery (#20820) --- sdk/monitor/azquery/CHANGELOG.md | 10 ++++++++++ sdk/monitor/azquery/version.go | 2 +- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/sdk/monitor/azquery/CHANGELOG.md b/sdk/monitor/azquery/CHANGELOG.md index 5f27242e7455..fb6606ebc3e5 100644 --- a/sdk/monitor/azquery/CHANGELOG.md +++ b/sdk/monitor/azquery/CHANGELOG.md @@ -1,5 +1,15 @@ # Release History +## 1.1.1 (Unreleased) + +### Features Added + +### Breaking Changes + +### Bugs Fixed + +### Other Changes + ## 1.1.0 (2023-05-09) ### Other Changes diff --git a/sdk/monitor/azquery/version.go b/sdk/monitor/azquery/version.go index 59ca11d98ff1..3e826b868deb 100644 --- a/sdk/monitor/azquery/version.go +++ b/sdk/monitor/azquery/version.go @@ -8,5 +8,5 @@ package azquery const ( moduleName = "azquery" - version = "v1.1.0" + version = "v1.1.1" ) From f4e6a2296543f1dfb5974c1997b71355b9ea380f Mon Sep 17 00:00:00 2001 From: gracewilcox <43627800+gracewilcox@users.noreply.github.com> Date: Wed, 10 May 2023 15:00:08 -0700 Subject: [PATCH 46/50] [keyvault] prep for release (#20819) * prep for release * perf tests * update date --- sdk/security/keyvault/azadmin/CHANGELOG.md | 11 +-- sdk/security/keyvault/azadmin/go.mod | 16 ++-- sdk/security/keyvault/azadmin/go.sum | 32 +++---- .../keyvault/azadmin/internal/version.go | 2 +- .../keyvault/azcertificates/CHANGELOG.md | 9 +- sdk/security/keyvault/azcertificates/go.mod | 24 +++--- sdk/security/keyvault/azcertificates/go.sum | 54 ++++++------ .../azcertificates/testdata/perf/go.mod | 19 ++-- .../azcertificates/testdata/perf/go.sum | 86 ++++--------------- .../keyvault/azcertificates/version.go | 2 +- sdk/security/keyvault/azkeys/CHANGELOG.md | 9 +- sdk/security/keyvault/azkeys/go.mod | 24 +++--- sdk/security/keyvault/azkeys/go.sum | 54 ++++++------ .../keyvault/azkeys/testdata/perf/go.mod | 19 ++-- .../keyvault/azkeys/testdata/perf/go.sum | 86 ++++--------------- sdk/security/keyvault/azkeys/version.go | 2 +- sdk/security/keyvault/azsecrets/CHANGELOG.md | 9 +- sdk/security/keyvault/azsecrets/go.mod | 24 +++--- sdk/security/keyvault/azsecrets/go.sum | 54 ++++++------ .../keyvault/azsecrets/testdata/perf/go.mod | 19 ++-- .../keyvault/azsecrets/testdata/perf/go.sum | 49 ++++------- sdk/security/keyvault/azsecrets/version.go | 2 +- 22 files changed, 233 insertions(+), 373 deletions(-) diff --git a/sdk/security/keyvault/azadmin/CHANGELOG.md b/sdk/security/keyvault/azadmin/CHANGELOG.md index 9801ea4bf6d3..2ce73e268b87 100644 --- a/sdk/security/keyvault/azadmin/CHANGELOG.md +++ b/sdk/security/keyvault/azadmin/CHANGELOG.md @@ -1,14 +1,9 @@ ## Release History -### 0.2.1 (Unreleased) +## 1.0.0 (2023-05-11) -#### Features Added - -#### Breaking Changes - -#### Bugs Fixed - -#### Other Changes +### Other Changes +* Updated dependencies ### 0.2.0 (2023-04-13) diff --git a/sdk/security/keyvault/azadmin/go.mod b/sdk/security/keyvault/azadmin/go.mod index 0401754c0546..3d736680146e 100644 --- a/sdk/security/keyvault/azadmin/go.mod +++ b/sdk/security/keyvault/azadmin/go.mod @@ -3,8 +3,8 @@ module github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azadmin go 1.18 require ( - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v0.11.0 github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v0.8.0 @@ -13,17 +13,17 @@ require ( ) require ( - github.com/AzureAD/microsoft-authentication-library-for-go v0.8.1 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dnaeon/go-vcr v1.2.0 // indirect - github.com/golang-jwt/jwt/v4 v4.4.3 // indirect + github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - golang.org/x/crypto v0.6.0 // indirect - golang.org/x/net v0.8.0 // indirect - golang.org/x/sys v0.6.0 // indirect - golang.org/x/text v0.8.0 // indirect + golang.org/x/crypto v0.9.0 // indirect + golang.org/x/net v0.10.0 // indirect + golang.org/x/sys v0.8.0 // indirect + golang.org/x/text v0.9.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/sdk/security/keyvault/azadmin/go.sum b/sdk/security/keyvault/azadmin/go.sum index eda99c5307f2..d443d38dbf8c 100644 --- a/sdk/security/keyvault/azadmin/go.sum +++ b/sdk/security/keyvault/azadmin/go.sum @@ -1,22 +1,22 @@ -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0 h1:rTnT/Jrcm+figWlYz4Ixzt0SJVR2cMC8lvZcimipiEY= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0/go.mod h1:ON4tFdPTwRcgWEaVDrN3584Ef+b7GgSJaXxe5fW9t4M= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1 h1:T8quHYlUGyb/oqtSTwqlCr1ilJHrDv+ZtpSfo+hm1BU= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.1/go.mod h1:gLa1CL2RNE4s7M3yopJ/p0iq5DdY6Yv5ZUt9MTRZOQM= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 h1:8kDqDngH+DmVBiCtIjCFTGa7MBnsIOkF9IccInFEbjk= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U= github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v0.11.0 h1:efdSCWUBtk2FUUIlEfZhRQyVIM3Ts8lA3vaF18amnwo= github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v0.11.0/go.mod h1:LLJYu/UhJ8GpH5PtJc06RmJ1gJ5mPCSc1PiDMW17MHM= github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v0.8.0 h1:T028gtTPiYt/RMUfs8nVsAL7FDQrfLlrm/NnRG/zcC4= github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v0.8.0/go.mod h1:cw4zVQgBby0Z5f2v0itn6se2dDP17nTjbZFXW5uPyHA= -github.com/AzureAD/microsoft-authentication-library-for-go v0.8.1 h1:oPdPEZFSbl7oSPEAIPMPBMUmiL+mqgzBJwM/9qYcwNg= -github.com/AzureAD/microsoft-authentication-library-for-go v0.8.1/go.mod h1:4qFor3D/HDsvBME35Xy9rwW9DecL+M2sNw1ybjPtwA0= +github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY= +github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/golang-jwt/jwt/v4 v4.4.3 h1:Hxl6lhQFj4AnOX6MLrsCb/+7tCj7DxP7VA+2rDIq5AU= -github.com/golang-jwt/jwt/v4 v4.4.3/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= @@ -33,15 +33,15 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= -golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g= +golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= +golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/sdk/security/keyvault/azadmin/internal/version.go b/sdk/security/keyvault/azadmin/internal/version.go index a247ad64aa14..3476033e8db1 100644 --- a/sdk/security/keyvault/azadmin/internal/version.go +++ b/sdk/security/keyvault/azadmin/internal/version.go @@ -8,5 +8,5 @@ package internal const ( ModuleName = "azadmin" - Version = "v0.2.1" + Version = "v1.0.0" ) diff --git a/sdk/security/keyvault/azcertificates/CHANGELOG.md b/sdk/security/keyvault/azcertificates/CHANGELOG.md index 21b91691dd46..f78a18bc4269 100644 --- a/sdk/security/keyvault/azcertificates/CHANGELOG.md +++ b/sdk/security/keyvault/azcertificates/CHANGELOG.md @@ -1,14 +1,9 @@ # Release History -## 0.10.1 (Unreleased) - -### Features Added - -### Breaking Changes - -### Bugs Fixed +## 1.0.0 (2023-05-11) ### Other Changes +* Updated dependencies ## 0.10.0 (2023-04-13) diff --git a/sdk/security/keyvault/azcertificates/go.mod b/sdk/security/keyvault/azcertificates/go.mod index 518df8ee4700..bc7cc5e9b251 100644 --- a/sdk/security/keyvault/azcertificates/go.mod +++ b/sdk/security/keyvault/azcertificates/go.mod @@ -3,26 +3,26 @@ module github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azcertificates go 1.18 require ( - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 - github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 + github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v0.8.0 github.com/stretchr/testify v1.8.2 ) require ( - github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/dnaeon/go-vcr v1.1.0 // indirect - github.com/golang-jwt/jwt v3.2.1+incompatible // indirect - github.com/google/uuid v1.1.1 // indirect + github.com/dnaeon/go-vcr v1.2.0 // indirect + github.com/golang-jwt/jwt/v4 v4.5.0 // indirect + github.com/google/uuid v1.3.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect - github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 // indirect + github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88 // indirect - golang.org/x/net v0.7.0 // indirect - golang.org/x/sys v0.5.0 // indirect - golang.org/x/text v0.7.0 // indirect + golang.org/x/crypto v0.9.0 // indirect + golang.org/x/net v0.10.0 // indirect + golang.org/x/sys v0.8.0 // indirect + golang.org/x/text v0.9.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/sdk/security/keyvault/azcertificates/go.sum b/sdk/security/keyvault/azcertificates/go.sum index 020402e414b9..51e117ba2909 100644 --- a/sdk/security/keyvault/azcertificates/go.sum +++ b/sdk/security/keyvault/azcertificates/go.sum @@ -1,29 +1,27 @@ -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0 h1:rTnT/Jrcm+figWlYz4Ixzt0SJVR2cMC8lvZcimipiEY= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0/go.mod h1:ON4tFdPTwRcgWEaVDrN3584Ef+b7GgSJaXxe5fW9t4M= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 h1:QkAcEIAKbNL4KoFr4SathZPhDhF4mVwpBMFlYjyAqy8= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0/go.mod h1:bhXu1AjYL+wutSL/kpSq6s7733q2Rb0yuot9Zgfqa/0= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0 h1:leh5DwKv6Ihwi+h60uHtn6UWAxBbZ0q8DwQVMzf61zw= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 h1:8kDqDngH+DmVBiCtIjCFTGa7MBnsIOkF9IccInFEbjk= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v0.8.0 h1:T028gtTPiYt/RMUfs8nVsAL7FDQrfLlrm/NnRG/zcC4= github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v0.8.0/go.mod h1:cw4zVQgBby0Z5f2v0itn6se2dDP17nTjbZFXW5uPyHA= -github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1 h1:BWe8a+f/t+7KY7zH2mqygeUD0t8hNFXe08p1Pb3/jKE= -github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1/go.mod h1:Vt9sXTKwMyGcOxSmLDMnGPgqsUg7m8pe215qMLrDXw4= +github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY= +github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c= -github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko= -github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= -github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= -github.com/golang-jwt/jwt/v4 v4.2.0 h1:besgBTC8w8HjP6NzQdxwKH9Z5oQMZ24ThTrHp3cZ8eU= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= +github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= -github.com/montanaflynn/stats v0.6.6/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= -github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 h1:Qj1ukM4GlMWXNdMBuXcXfz/Kw9s1qm0CLY32QxuSImI= -github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -33,18 +31,18 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88 h1:Tgea0cVUD0ivh5ADBX4WwuI12DUd2to3nCYe2eayMIw= -golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g= +golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= +golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/sdk/security/keyvault/azcertificates/testdata/perf/go.mod b/sdk/security/keyvault/azcertificates/testdata/perf/go.mod index a9463126402d..c729f56244b1 100644 --- a/sdk/security/keyvault/azcertificates/testdata/perf/go.mod +++ b/sdk/security/keyvault/azcertificates/testdata/perf/go.mod @@ -5,22 +5,21 @@ go 1.18 replace github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azcertificates => ../.. require ( - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.2 - github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0 - github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azcertificates v0.3.0 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 + github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 + github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azcertificates v0.10.0 ) require ( - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v0.8.0 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v0.9.0 // indirect - github.com/golang-jwt/jwt v3.2.1+incompatible // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/google/uuid v1.3.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect - golang.org/x/crypto v0.6.0 // indirect - golang.org/x/net v0.7.0 // indirect - golang.org/x/sys v0.5.0 // indirect - golang.org/x/text v0.7.0 // indirect + golang.org/x/crypto v0.9.0 // indirect + golang.org/x/net v0.10.0 // indirect + golang.org/x/sys v0.8.0 // indirect + golang.org/x/text v0.9.0 // indirect ) diff --git a/sdk/security/keyvault/azcertificates/testdata/perf/go.sum b/sdk/security/keyvault/azcertificates/testdata/perf/go.sum index 7a20a947a37e..9e3409f5ceb6 100644 --- a/sdk/security/keyvault/azcertificates/testdata/perf/go.sum +++ b/sdk/security/keyvault/azcertificates/testdata/perf/go.sum @@ -1,81 +1,33 @@ -github.com/Azure/azure-sdk-for-go/sdk/azcore v0.23.0 h1:D7l5jspkc4kwBYRWoZE4DQnu6LVpLwDsMZjBKS4wZLQ= -github.com/Azure/azure-sdk-for-go/sdk/azcore v0.23.0/go.mod h1:w5pDIZuawUmY3Bj4tVx3Xb8KS96ToB0j315w9rqpAg0= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0 h1:rTnT/Jrcm+figWlYz4Ixzt0SJVR2cMC8lvZcimipiEY= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0/go.mod h1:ON4tFdPTwRcgWEaVDrN3584Ef+b7GgSJaXxe5fW9t4M= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.14.0 h1:NVS/4LOQfkBpk+B1VopIzv1ptmYeEskA8w/3K/w7vjo= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.14.0/go.mod h1:RG0cZndeZM17StwohYclmcXSr4oOJ8b1I5hB8llIc6Y= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0/go.mod h1:bhXu1AjYL+wutSL/kpSq6s7733q2Rb0yuot9Zgfqa/0= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.2 h1:uqM+VoHjVH6zdlkLF2b6O0ZANcHoj3rO0PoQ3jglUJA= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.2/go.mod h1:twTKAa1E6hLmSDjLhaCkbTMQKc7p/rNLU40rLxGEOCI= -github.com/Azure/azure-sdk-for-go/sdk/internal v0.9.2 h1:Px2KVERcYEg2Lv25AqC2hVr0xUWaq94wuEObLIkYzmA= -github.com/Azure/azure-sdk-for-go/sdk/internal v0.9.2/go.mod h1:CdSJQNNzZhCkwDaV27XV1w48ZBPtxe7mlrZAsPNxD5g= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0 h1:leh5DwKv6Ihwi+h60uHtn6UWAxBbZ0q8DwQVMzf61zw= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 h1:8kDqDngH+DmVBiCtIjCFTGa7MBnsIOkF9IccInFEbjk= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v0.8.0 h1:T028gtTPiYt/RMUfs8nVsAL7FDQrfLlrm/NnRG/zcC4= github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v0.8.0/go.mod h1:cw4zVQgBby0Z5f2v0itn6se2dDP17nTjbZFXW5uPyHA= -github.com/AzureAD/microsoft-authentication-library-for-go v0.4.0 h1:WVsrXCnHlDDX8ls+tootqRE87/hL9S/g4ewig9RsD/c= -github.com/AzureAD/microsoft-authentication-library-for-go v0.4.0/go.mod h1:Vt9sXTKwMyGcOxSmLDMnGPgqsUg7m8pe215qMLrDXw4= -github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1/go.mod h1:Vt9sXTKwMyGcOxSmLDMnGPgqsUg7m8pe215qMLrDXw4= -github.com/AzureAD/microsoft-authentication-library-for-go v0.9.0 h1:UE9n9rkJF62ArLb1F3DEjRt8O3jLwMWdSoypKV4f3MU= -github.com/AzureAD/microsoft-authentication-library-for-go v0.9.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY= +github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c= -github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko= -github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= -github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= -github.com/golang-jwt/jwt/v4 v4.2.0 h1:besgBTC8w8HjP6NzQdxwKH9Z5oQMZ24ThTrHp3cZ8eU= +github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= -github.com/montanaflynn/stats v0.6.6/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= -github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 h1:Qj1ukM4GlMWXNdMBuXcXfz/Kw9s1qm0CLY32QxuSImI= -github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897 h1:pLI5jrR7OSLijeIDcmRxNmw2api+jEfxLoykJVice/E= -golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= -golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f h1:OfiFi4JbukWwe3lzw+xunroH1mnC1e2Gy5cxNJApiSY= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g= +golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= +golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654 h1:id054HUawV2/6IGm2IV8KZQjqtwAOo2CYlOToYqa0d0= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/sdk/security/keyvault/azcertificates/version.go b/sdk/security/keyvault/azcertificates/version.go index fcd766917ec2..b3d53e37bc29 100644 --- a/sdk/security/keyvault/azcertificates/version.go +++ b/sdk/security/keyvault/azcertificates/version.go @@ -8,5 +8,5 @@ package azcertificates const ( moduleName = "azcertificates" - version = "v0.10.1" + version = "v1.0.0" ) diff --git a/sdk/security/keyvault/azkeys/CHANGELOG.md b/sdk/security/keyvault/azkeys/CHANGELOG.md index 83a4f8a1d69a..1bf58ce5eb8d 100644 --- a/sdk/security/keyvault/azkeys/CHANGELOG.md +++ b/sdk/security/keyvault/azkeys/CHANGELOG.md @@ -1,14 +1,9 @@ # Release History -## 0.11.1 (Unreleased) - -### Features Added - -### Breaking Changes - -### Bugs Fixed +## 1.0.0 (2023-05-11) ### Other Changes +* Updated dependencies ## 0.11.0 (2023-04-13) diff --git a/sdk/security/keyvault/azkeys/go.mod b/sdk/security/keyvault/azkeys/go.mod index db3f1705323f..06b59e94fcbc 100644 --- a/sdk/security/keyvault/azkeys/go.mod +++ b/sdk/security/keyvault/azkeys/go.mod @@ -3,26 +3,26 @@ module github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys go 1.18 require ( - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 - github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 + github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v0.8.0 github.com/stretchr/testify v1.8.2 ) require ( - github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/dnaeon/go-vcr v1.1.0 // indirect - github.com/golang-jwt/jwt v3.2.1+incompatible // indirect - github.com/google/uuid v1.1.1 // indirect + github.com/dnaeon/go-vcr v1.2.0 // indirect + github.com/golang-jwt/jwt/v4 v4.5.0 // indirect + github.com/google/uuid v1.3.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect - github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 // indirect + github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88 // indirect - golang.org/x/net v0.8.0 // indirect - golang.org/x/sys v0.6.0 // indirect - golang.org/x/text v0.8.0 // indirect + golang.org/x/crypto v0.9.0 // indirect + golang.org/x/net v0.10.0 // indirect + golang.org/x/sys v0.8.0 // indirect + golang.org/x/text v0.9.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/sdk/security/keyvault/azkeys/go.sum b/sdk/security/keyvault/azkeys/go.sum index f4c11a230c77..51e117ba2909 100644 --- a/sdk/security/keyvault/azkeys/go.sum +++ b/sdk/security/keyvault/azkeys/go.sum @@ -1,29 +1,27 @@ -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0 h1:rTnT/Jrcm+figWlYz4Ixzt0SJVR2cMC8lvZcimipiEY= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0/go.mod h1:ON4tFdPTwRcgWEaVDrN3584Ef+b7GgSJaXxe5fW9t4M= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 h1:QkAcEIAKbNL4KoFr4SathZPhDhF4mVwpBMFlYjyAqy8= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0/go.mod h1:bhXu1AjYL+wutSL/kpSq6s7733q2Rb0yuot9Zgfqa/0= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0 h1:leh5DwKv6Ihwi+h60uHtn6UWAxBbZ0q8DwQVMzf61zw= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 h1:8kDqDngH+DmVBiCtIjCFTGa7MBnsIOkF9IccInFEbjk= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v0.8.0 h1:T028gtTPiYt/RMUfs8nVsAL7FDQrfLlrm/NnRG/zcC4= github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v0.8.0/go.mod h1:cw4zVQgBby0Z5f2v0itn6se2dDP17nTjbZFXW5uPyHA= -github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1 h1:BWe8a+f/t+7KY7zH2mqygeUD0t8hNFXe08p1Pb3/jKE= -github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1/go.mod h1:Vt9sXTKwMyGcOxSmLDMnGPgqsUg7m8pe215qMLrDXw4= +github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY= +github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c= -github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko= -github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= -github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= -github.com/golang-jwt/jwt/v4 v4.2.0 h1:besgBTC8w8HjP6NzQdxwKH9Z5oQMZ24ThTrHp3cZ8eU= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= +github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= -github.com/montanaflynn/stats v0.6.6/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= -github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 h1:Qj1ukM4GlMWXNdMBuXcXfz/Kw9s1qm0CLY32QxuSImI= -github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -33,18 +31,18 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88 h1:Tgea0cVUD0ivh5ADBX4WwuI12DUd2to3nCYe2eayMIw= -golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g= +golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= +golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/sdk/security/keyvault/azkeys/testdata/perf/go.mod b/sdk/security/keyvault/azkeys/testdata/perf/go.mod index cd071a65aae0..e146d87a3b81 100644 --- a/sdk/security/keyvault/azkeys/testdata/perf/go.mod +++ b/sdk/security/keyvault/azkeys/testdata/perf/go.mod @@ -5,22 +5,21 @@ go 1.18 replace github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys => ../.. require ( - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.2 - github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0 - github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v0.5.0 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 + github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 + github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v0.11.0 ) require ( github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v0.8.0 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v0.9.0 // indirect - github.com/golang-jwt/jwt v3.2.1+incompatible // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/google/uuid v1.3.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect - golang.org/x/crypto v0.6.0 // indirect - golang.org/x/net v0.8.0 // indirect - golang.org/x/sys v0.6.0 // indirect - golang.org/x/text v0.8.0 // indirect + golang.org/x/crypto v0.9.0 // indirect + golang.org/x/net v0.10.0 // indirect + golang.org/x/sys v0.8.0 // indirect + golang.org/x/text v0.9.0 // indirect ) diff --git a/sdk/security/keyvault/azkeys/testdata/perf/go.sum b/sdk/security/keyvault/azkeys/testdata/perf/go.sum index 1c8487835094..9e3409f5ceb6 100644 --- a/sdk/security/keyvault/azkeys/testdata/perf/go.sum +++ b/sdk/security/keyvault/azkeys/testdata/perf/go.sum @@ -1,81 +1,33 @@ -github.com/Azure/azure-sdk-for-go/sdk/azcore v0.23.0 h1:D7l5jspkc4kwBYRWoZE4DQnu6LVpLwDsMZjBKS4wZLQ= -github.com/Azure/azure-sdk-for-go/sdk/azcore v0.23.0/go.mod h1:w5pDIZuawUmY3Bj4tVx3Xb8KS96ToB0j315w9rqpAg0= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0 h1:rTnT/Jrcm+figWlYz4Ixzt0SJVR2cMC8lvZcimipiEY= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0/go.mod h1:ON4tFdPTwRcgWEaVDrN3584Ef+b7GgSJaXxe5fW9t4M= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.14.0 h1:NVS/4LOQfkBpk+B1VopIzv1ptmYeEskA8w/3K/w7vjo= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.14.0/go.mod h1:RG0cZndeZM17StwohYclmcXSr4oOJ8b1I5hB8llIc6Y= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0/go.mod h1:bhXu1AjYL+wutSL/kpSq6s7733q2Rb0yuot9Zgfqa/0= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.2 h1:uqM+VoHjVH6zdlkLF2b6O0ZANcHoj3rO0PoQ3jglUJA= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.2/go.mod h1:twTKAa1E6hLmSDjLhaCkbTMQKc7p/rNLU40rLxGEOCI= -github.com/Azure/azure-sdk-for-go/sdk/internal v0.9.2 h1:Px2KVERcYEg2Lv25AqC2hVr0xUWaq94wuEObLIkYzmA= -github.com/Azure/azure-sdk-for-go/sdk/internal v0.9.2/go.mod h1:CdSJQNNzZhCkwDaV27XV1w48ZBPtxe7mlrZAsPNxD5g= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0 h1:leh5DwKv6Ihwi+h60uHtn6UWAxBbZ0q8DwQVMzf61zw= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 h1:8kDqDngH+DmVBiCtIjCFTGa7MBnsIOkF9IccInFEbjk= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v0.8.0 h1:T028gtTPiYt/RMUfs8nVsAL7FDQrfLlrm/NnRG/zcC4= github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v0.8.0/go.mod h1:cw4zVQgBby0Z5f2v0itn6se2dDP17nTjbZFXW5uPyHA= -github.com/AzureAD/microsoft-authentication-library-for-go v0.4.0 h1:WVsrXCnHlDDX8ls+tootqRE87/hL9S/g4ewig9RsD/c= -github.com/AzureAD/microsoft-authentication-library-for-go v0.4.0/go.mod h1:Vt9sXTKwMyGcOxSmLDMnGPgqsUg7m8pe215qMLrDXw4= -github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1/go.mod h1:Vt9sXTKwMyGcOxSmLDMnGPgqsUg7m8pe215qMLrDXw4= -github.com/AzureAD/microsoft-authentication-library-for-go v0.9.0 h1:UE9n9rkJF62ArLb1F3DEjRt8O3jLwMWdSoypKV4f3MU= -github.com/AzureAD/microsoft-authentication-library-for-go v0.9.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY= +github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c= -github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko= -github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= -github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= -github.com/golang-jwt/jwt/v4 v4.2.0 h1:besgBTC8w8HjP6NzQdxwKH9Z5oQMZ24ThTrHp3cZ8eU= +github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= -github.com/montanaflynn/stats v0.6.6/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= -github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 h1:Qj1ukM4GlMWXNdMBuXcXfz/Kw9s1qm0CLY32QxuSImI= -github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897 h1:pLI5jrR7OSLijeIDcmRxNmw2api+jEfxLoykJVice/E= -golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= -golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f h1:OfiFi4JbukWwe3lzw+xunroH1mnC1e2Gy5cxNJApiSY= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g= +golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= +golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654 h1:id054HUawV2/6IGm2IV8KZQjqtwAOo2CYlOToYqa0d0= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/sdk/security/keyvault/azkeys/version.go b/sdk/security/keyvault/azkeys/version.go index e43ed94345f2..d38632ecc25a 100644 --- a/sdk/security/keyvault/azkeys/version.go +++ b/sdk/security/keyvault/azkeys/version.go @@ -8,5 +8,5 @@ package azkeys const ( moduleName = "azkeys" - version = "v0.11.1" + version = "v1.0.0" ) diff --git a/sdk/security/keyvault/azsecrets/CHANGELOG.md b/sdk/security/keyvault/azsecrets/CHANGELOG.md index 3219c8bfe2fe..6336429cfa50 100644 --- a/sdk/security/keyvault/azsecrets/CHANGELOG.md +++ b/sdk/security/keyvault/azsecrets/CHANGELOG.md @@ -1,14 +1,9 @@ # Release History -## 0.13.1 (Unreleased) - -### Features Added - -### Breaking Changes - -### Bugs Fixed +## 1.0.0 (2023-05-11) ### Other Changes +* Updated dependencies ## 0.13.0 (2023-04-13) diff --git a/sdk/security/keyvault/azsecrets/go.mod b/sdk/security/keyvault/azsecrets/go.mod index 48c9dc66a6f3..6d224d94bb44 100644 --- a/sdk/security/keyvault/azsecrets/go.mod +++ b/sdk/security/keyvault/azsecrets/go.mod @@ -3,26 +3,26 @@ module github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets go 1.18 require ( - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 - github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 + github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v0.8.0 github.com/stretchr/testify v1.8.2 ) require ( - github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/dnaeon/go-vcr v1.1.0 // indirect - github.com/golang-jwt/jwt v3.2.1+incompatible // indirect - github.com/google/uuid v1.1.1 // indirect + github.com/dnaeon/go-vcr v1.2.0 // indirect + github.com/golang-jwt/jwt/v4 v4.5.0 // indirect + github.com/google/uuid v1.3.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect - github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 // indirect + github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88 // indirect - golang.org/x/net v0.8.0 // indirect - golang.org/x/sys v0.6.0 // indirect - golang.org/x/text v0.8.0 // indirect + golang.org/x/crypto v0.9.0 // indirect + golang.org/x/net v0.10.0 // indirect + golang.org/x/sys v0.8.0 // indirect + golang.org/x/text v0.9.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/sdk/security/keyvault/azsecrets/go.sum b/sdk/security/keyvault/azsecrets/go.sum index f4c11a230c77..51e117ba2909 100644 --- a/sdk/security/keyvault/azsecrets/go.sum +++ b/sdk/security/keyvault/azsecrets/go.sum @@ -1,29 +1,27 @@ -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0 h1:rTnT/Jrcm+figWlYz4Ixzt0SJVR2cMC8lvZcimipiEY= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0/go.mod h1:ON4tFdPTwRcgWEaVDrN3584Ef+b7GgSJaXxe5fW9t4M= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 h1:QkAcEIAKbNL4KoFr4SathZPhDhF4mVwpBMFlYjyAqy8= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0/go.mod h1:bhXu1AjYL+wutSL/kpSq6s7733q2Rb0yuot9Zgfqa/0= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0 h1:leh5DwKv6Ihwi+h60uHtn6UWAxBbZ0q8DwQVMzf61zw= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 h1:8kDqDngH+DmVBiCtIjCFTGa7MBnsIOkF9IccInFEbjk= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v0.8.0 h1:T028gtTPiYt/RMUfs8nVsAL7FDQrfLlrm/NnRG/zcC4= github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v0.8.0/go.mod h1:cw4zVQgBby0Z5f2v0itn6se2dDP17nTjbZFXW5uPyHA= -github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1 h1:BWe8a+f/t+7KY7zH2mqygeUD0t8hNFXe08p1Pb3/jKE= -github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1/go.mod h1:Vt9sXTKwMyGcOxSmLDMnGPgqsUg7m8pe215qMLrDXw4= +github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY= +github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c= -github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko= -github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= -github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= -github.com/golang-jwt/jwt/v4 v4.2.0 h1:besgBTC8w8HjP6NzQdxwKH9Z5oQMZ24ThTrHp3cZ8eU= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= +github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= -github.com/montanaflynn/stats v0.6.6/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= -github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 h1:Qj1ukM4GlMWXNdMBuXcXfz/Kw9s1qm0CLY32QxuSImI= -github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -33,18 +31,18 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88 h1:Tgea0cVUD0ivh5ADBX4WwuI12DUd2to3nCYe2eayMIw= -golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g= +golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= +golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/sdk/security/keyvault/azsecrets/testdata/perf/go.mod b/sdk/security/keyvault/azsecrets/testdata/perf/go.mod index ae9a57f86f2f..90e8e662fed8 100644 --- a/sdk/security/keyvault/azsecrets/testdata/perf/go.mod +++ b/sdk/security/keyvault/azsecrets/testdata/perf/go.mod @@ -5,22 +5,21 @@ go 1.18 replace github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets => ../.. require ( - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.2 - github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0 - github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets v0.7.0 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 + github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 + github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets v0.13.0 ) require ( github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v0.8.0 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v0.9.0 // indirect - github.com/golang-jwt/jwt v3.2.1+incompatible // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/google/uuid v1.3.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect - golang.org/x/crypto v0.6.0 // indirect - golang.org/x/net v0.8.0 // indirect - golang.org/x/sys v0.6.0 // indirect - golang.org/x/text v0.8.0 // indirect + golang.org/x/crypto v0.9.0 // indirect + golang.org/x/net v0.10.0 // indirect + golang.org/x/sys v0.8.0 // indirect + golang.org/x/text v0.9.0 // indirect ) diff --git a/sdk/security/keyvault/azsecrets/testdata/perf/go.sum b/sdk/security/keyvault/azsecrets/testdata/perf/go.sum index 8f4b16fb9eaf..9e3409f5ceb6 100644 --- a/sdk/security/keyvault/azsecrets/testdata/perf/go.sum +++ b/sdk/security/keyvault/azsecrets/testdata/perf/go.sum @@ -1,48 +1,33 @@ -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0 h1:rTnT/Jrcm+figWlYz4Ixzt0SJVR2cMC8lvZcimipiEY= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0/go.mod h1:ON4tFdPTwRcgWEaVDrN3584Ef+b7GgSJaXxe5fW9t4M= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 h1:QkAcEIAKbNL4KoFr4SathZPhDhF4mVwpBMFlYjyAqy8= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0/go.mod h1:bhXu1AjYL+wutSL/kpSq6s7733q2Rb0yuot9Zgfqa/0= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.2 h1:uqM+VoHjVH6zdlkLF2b6O0ZANcHoj3rO0PoQ3jglUJA= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.2/go.mod h1:twTKAa1E6hLmSDjLhaCkbTMQKc7p/rNLU40rLxGEOCI= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0 h1:leh5DwKv6Ihwi+h60uHtn6UWAxBbZ0q8DwQVMzf61zw= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 h1:8kDqDngH+DmVBiCtIjCFTGa7MBnsIOkF9IccInFEbjk= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v0.8.0 h1:T028gtTPiYt/RMUfs8nVsAL7FDQrfLlrm/NnRG/zcC4= github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v0.8.0/go.mod h1:cw4zVQgBby0Z5f2v0itn6se2dDP17nTjbZFXW5uPyHA= -github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1 h1:BWe8a+f/t+7KY7zH2mqygeUD0t8hNFXe08p1Pb3/jKE= -github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1/go.mod h1:Vt9sXTKwMyGcOxSmLDMnGPgqsUg7m8pe215qMLrDXw4= -github.com/AzureAD/microsoft-authentication-library-for-go v0.9.0 h1:UE9n9rkJF62ArLb1F3DEjRt8O3jLwMWdSoypKV4f3MU= -github.com/AzureAD/microsoft-authentication-library-for-go v0.9.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= +github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY= +github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0/go.mod h1:kgDmCTgBzIEPFElEF+FK0SdjAor06dRq2Go927dnQ6o= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c= -github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= -github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= -github.com/golang-jwt/jwt/v4 v4.2.0 h1:besgBTC8w8HjP6NzQdxwKH9Z5oQMZ24ThTrHp3cZ8eU= +github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/montanaflynn/stats v0.6.6/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= -github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 h1:Qj1ukM4GlMWXNdMBuXcXfz/Kw9s1qm0CLY32QxuSImI= -github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88 h1:Tgea0cVUD0ivh5ADBX4WwuI12DUd2to3nCYe2eayMIw= -golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= -golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g= +golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= +golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/sdk/security/keyvault/azsecrets/version.go b/sdk/security/keyvault/azsecrets/version.go index 1a195cdc8507..936e7274d409 100644 --- a/sdk/security/keyvault/azsecrets/version.go +++ b/sdk/security/keyvault/azsecrets/version.go @@ -8,5 +8,5 @@ package azsecrets const ( moduleName = "azsecrets" - version = "v0.13.1" + version = "v1.0.0" ) From 450a48e54f21771effd65a0a90ac1847eff0ad3f Mon Sep 17 00:00:00 2001 From: Tamer Sherif Date: Mon, 31 Jul 2023 14:06:30 -0700 Subject: [PATCH 47/50] lease implementation and tests --- sdk/storage/azdatalake/assets.json | 2 +- sdk/storage/azdatalake/directory/client.go | 4 +- sdk/storage/azdatalake/file/client.go | 4 +- sdk/storage/azdatalake/filesystem/client.go | 4 +- .../internal/testcommon/clients_auth.go | 7 + .../lease/{filesystem_client.go => client.go} | 29 +- sdk/storage/azdatalake/lease/client_test.go | 390 ++++++++++++++++++ sdk/storage/azdatalake/lease/models.go | 40 ++ sdk/storage/azdatalake/lease/path_client.go | 36 +- 9 files changed, 494 insertions(+), 22 deletions(-) rename sdk/storage/azdatalake/lease/{filesystem_client.go => client.go} (75%) create mode 100644 sdk/storage/azdatalake/lease/client_test.go diff --git a/sdk/storage/azdatalake/assets.json b/sdk/storage/azdatalake/assets.json index 67a2df2961e2..f1deb459450c 100644 --- a/sdk/storage/azdatalake/assets.json +++ b/sdk/storage/azdatalake/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "go", "TagPrefix": "go/storage/azdatalake", - "Tag": "go/storage/azdatalake_9dd1cc3e0e" + "Tag": "go/storage/azdatalake_0fd3c536e7" } \ No newline at end of file diff --git a/sdk/storage/azdatalake/directory/client.go b/sdk/storage/azdatalake/directory/client.go index 8c998a0f812b..da7982ab0b62 100644 --- a/sdk/storage/azdatalake/directory/client.go +++ b/sdk/storage/azdatalake/directory/client.go @@ -219,14 +219,14 @@ func (d *Client) renamePathInURL(newName string) (string, string, string) { lastIndex := strings.LastIndex(endpoint, separator) // Split the string based on the last occurrence of the separator firstPart := endpoint[:lastIndex] // From the beginning of the string to the last occurrence of the separator - newPathURL, newBlobURL := shared.GetURLs(runtime.JoinPaths(firstPart, newName)) + newBlobURL, newPathURL := shared.GetURLs(runtime.JoinPaths(firstPart, newName)) parsedNewURL, _ := url.Parse(d.DFSURL()) return parsedNewURL.Path, newPathURL, newBlobURL } // Rename renames a directory (dfs1) func (d *Client) Rename(ctx context.Context, newName string, options *RenameOptions) (RenameResponse, error) { - newPathWithoutURL, newBlobURL, newPathURL := d.renamePathInURL(newName) + newPathWithoutURL, newPathURL, newBlobURL := d.renamePathInURL(newName) lac, mac, smac, createOpts := path.FormatRenameOptions(options, newPathWithoutURL) var newBlobClient *blockblob.Client var err error diff --git a/sdk/storage/azdatalake/file/client.go b/sdk/storage/azdatalake/file/client.go index 7400838cf7fb..ca83eeecf698 100644 --- a/sdk/storage/azdatalake/file/client.go +++ b/sdk/storage/azdatalake/file/client.go @@ -224,14 +224,14 @@ func (f *Client) renamePathInURL(newName string) (string, string, string) { lastIndex := strings.LastIndex(endpoint, separator) // Split the string based on the last occurrence of the separator firstPart := endpoint[:lastIndex] // From the beginning of the string to the last occurrence of the separator - newPathURL, newBlobURL := shared.GetURLs(runtime.JoinPaths(firstPart, newName)) + newBlobURL, newPathURL := shared.GetURLs(runtime.JoinPaths(firstPart, newName)) parsedNewURL, _ := url.Parse(f.DFSURL()) return parsedNewURL.Path, newPathURL, newBlobURL } // Rename renames a file (dfs1) func (f *Client) Rename(ctx context.Context, newName string, options *RenameOptions) (RenameResponse, error) { - newPathWithoutURL, newBlobURL, newPathURL := f.renamePathInURL(newName) + newPathWithoutURL, newPathURL, newBlobURL := f.renamePathInURL(newName) lac, mac, smac, createOpts := path.FormatRenameOptions(options, newPathWithoutURL) var newBlobClient *blockblob.Client var err error diff --git a/sdk/storage/azdatalake/filesystem/client.go b/sdk/storage/azdatalake/filesystem/client.go index 6c21c0ed364e..46f6c22f06bb 100644 --- a/sdk/storage/azdatalake/filesystem/client.go +++ b/sdk/storage/azdatalake/filesystem/client.go @@ -184,7 +184,7 @@ func (fs *Client) BlobURL() string { // The new directory.Client uses the same request policy pipeline as the Client. func (fs *Client) NewDirectoryClient(directoryPath string) *directory.Client { dirURL := runtime.JoinPaths(fs.generatedFSClientWithDFS().Endpoint(), directoryPath) - dirURL, blobURL := shared.GetURLs(dirURL) + blobURL, dirURL := shared.GetURLs(dirURL) return (*directory.Client)(base.NewPathClient(dirURL, blobURL, fs.containerClient().NewBlockBlobClient(directoryPath), fs.generatedFSClientWithDFS().InternalClient().WithClientName(shared.DirectoryClient), fs.sharedKey(), fs.identityCredential(), fs.getClientOptions())) } @@ -192,7 +192,7 @@ func (fs *Client) NewDirectoryClient(directoryPath string) *directory.Client { // The new file.Client uses the same request policy pipeline as the Client. func (fs *Client) NewFileClient(filePath string) *file.Client { fileURL := runtime.JoinPaths(fs.generatedFSClientWithDFS().Endpoint(), filePath) - fileURL, blobURL := shared.GetURLs(filePath) + blobURL, fileURL := shared.GetURLs(fileURL) return (*file.Client)(base.NewPathClient(fileURL, blobURL, fs.containerClient().NewBlockBlobClient(filePath), fs.generatedFSClientWithDFS().InternalClient().WithClientName(shared.FileClient), fs.sharedKey(), fs.identityCredential(), fs.getClientOptions())) } diff --git a/sdk/storage/azdatalake/internal/testcommon/clients_auth.go b/sdk/storage/azdatalake/internal/testcommon/clients_auth.go index 20c2d683daa5..f7a455da259a 100644 --- a/sdk/storage/azdatalake/internal/testcommon/clients_auth.go +++ b/sdk/storage/azdatalake/internal/testcommon/clients_auth.go @@ -141,6 +141,13 @@ func GetFileClient(fsName, fName string, t *testing.T, accountType TestAccountTy return fileClient, err } +func CreateNewFile(ctx context.Context, _require *require.Assertions, fileName string, filesystemClient *filesystem.Client) *file.Client { + fileClient := filesystemClient.NewFileClient(fileName) + _, err := fileClient.Create(ctx, nil) + _require.Nil(err) + return fileClient +} + func GetDirClient(fsName, dirName string, t *testing.T, accountType TestAccountType, options *directory.ClientOptions) (*directory.Client, error) { if options == nil { options = &directory.ClientOptions{} diff --git a/sdk/storage/azdatalake/lease/filesystem_client.go b/sdk/storage/azdatalake/lease/client.go similarity index 75% rename from sdk/storage/azdatalake/lease/filesystem_client.go rename to sdk/storage/azdatalake/lease/client.go index 110171d15990..8844084043c3 100644 --- a/sdk/storage/azdatalake/lease/filesystem_client.go +++ b/sdk/storage/azdatalake/lease/client.go @@ -8,28 +8,36 @@ package lease import ( "context" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/lease" "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/filesystem" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/generated" ) // FilesystemClient provides lease functionality for the underlying filesystem client. type FilesystemClient struct { - leaseID *string containerClient *lease.ContainerClient + leaseID *string } // FilesystemClientOptions contains the optional values when creating a FilesystemClient. -type FilesystemClientOptions struct { - // LeaseID contains a caller-provided lease ID. - LeaseID *string -} +type FilesystemClientOptions = lease.ContainerClientOptions // NewFilesystemClient creates a filesystem lease client for the provided filesystem client. // - client - an instance of a filesystem client // - options - client options; pass nil to accept the default values func NewFilesystemClient(client *filesystem.Client, options *FilesystemClientOptions) (*FilesystemClient, error) { - // TODO: set up container lease client - return nil, nil + _, _, containerClient := base.InnerClients((*base.CompositeClient[generated.FileSystemClient, generated.FileSystemClient, container.Client])(client)) + containerLeaseClient, err := lease.NewContainerClient(containerClient, options) + if err != nil { + return nil, exported.ConvertToDFSError(err) + } + return &FilesystemClient{ + containerClient: containerLeaseClient, + leaseID: containerLeaseClient.LeaseID(), + }, nil } // LeaseID returns leaseID of the client. @@ -57,7 +65,12 @@ func (c *FilesystemClient) BreakLease(ctx context.Context, o *FilesystemBreakOpt // For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. func (c *FilesystemClient) ChangeLease(ctx context.Context, proposedLeaseID string, o *FilesystemChangeOptions) (FilesystemChangeResponse, error) { opts := o.format() - return c.containerClient.ChangeLease(ctx, proposedLeaseID, opts) + resp, err := c.containerClient.ChangeLease(ctx, proposedLeaseID, opts) + if err != nil { + return FilesystemChangeResponse{}, err + } + c.leaseID = &proposedLeaseID + return resp, nil } // RenewLease renews the filesystem's previously-acquired lease. diff --git a/sdk/storage/azdatalake/lease/client_test.go b/sdk/storage/azdatalake/lease/client_test.go new file mode 100644 index 000000000000..cb37374e5683 --- /dev/null +++ b/sdk/storage/azdatalake/lease/client_test.go @@ -0,0 +1,390 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package lease_test + +import ( + "context" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/file" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/filesystem" + "testing" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/recording" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/testcommon" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/lease" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +func Test(t *testing.T) { + recordMode := recording.GetRecordMode() + t.Logf("Running lease Tests in %s mode\n", recordMode) + if recordMode == recording.LiveMode { + suite.Run(t, &LeaseRecordedTestsSuite{}) + suite.Run(t, &LeaseUnrecordedTestsSuite{}) + } else if recordMode == recording.PlaybackMode { + suite.Run(t, &LeaseRecordedTestsSuite{}) + } else if recordMode == recording.RecordingMode { + suite.Run(t, &LeaseRecordedTestsSuite{}) + } +} + +func (s *LeaseRecordedTestsSuite) BeforeTest(suite string, test string) { + testcommon.BeforeTest(s.T(), suite, test) +} + +func (s *LeaseRecordedTestsSuite) AfterTest(suite string, test string) { + testcommon.AfterTest(s.T(), suite, test) +} + +func (s *LeaseUnrecordedTestsSuite) BeforeTest(suite string, test string) { + +} + +func (s *LeaseUnrecordedTestsSuite) AfterTest(suite string, test string) { + +} + +type LeaseRecordedTestsSuite struct { + suite.Suite +} + +type LeaseUnrecordedTestsSuite struct { + suite.Suite +} + +// var headersToIgnoreForLease = []string {"X-Ms-Proposed-Lease-Id", "X-Ms-Lease-Id"} +var proposedLeaseIDs = []*string{to.Ptr("c820a799-76d7-4ee2-6e15-546f19325c2c"), to.Ptr("326cc5e1-746e-4af8-4811-a50e6629a8ca")} + +func (s *LeaseRecordedTestsSuite) TestFilesystemAcquireLease() { + _require := require.New(s.T()) + testName := s.T().Name() + //ignoreHeaders(_context.recording, headersToIgnoreForLease) + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + filesystemName := testcommon.GenerateFilesystemName(testName) + filesystemClient := testcommon.CreateNewFilesystem(context.Background(), _require, filesystemName, svcClient) + defer testcommon.DeleteFilesystem(context.Background(), _require, filesystemClient) + + filesystemLeaseClient, _ := lease.NewFilesystemClient(filesystemClient, &lease.FilesystemClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + + ctx := context.Background() + acquireLeaseResponse, err := filesystemLeaseClient.AcquireLease(ctx, int32(60), nil) + _require.Nil(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(*acquireLeaseResponse.LeaseID, *filesystemLeaseClient.LeaseID()) + + _, err = filesystemLeaseClient.ReleaseLease(ctx, nil) + _require.Nil(err) +} + +func (s *LeaseRecordedTestsSuite) TestFilesystemDeleteFilesystemWithoutLeaseId() { + _require := require.New(s.T()) + testName := s.T().Name() + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + filesystemName := testcommon.GenerateFilesystemName(testName) + filesystemClient := testcommon.CreateNewFilesystem(context.Background(), _require, filesystemName, svcClient) + defer testcommon.DeleteFilesystem(context.Background(), _require, filesystemClient) + + filesystemLeaseClient, _ := lease.NewFilesystemClient(filesystemClient, &lease.FilesystemClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + + ctx := context.Background() + acquireLeaseResponse, err := filesystemLeaseClient.AcquireLease(ctx, int32(60), nil) + _require.Nil(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(*acquireLeaseResponse.LeaseID, *filesystemLeaseClient.LeaseID()) + + _, err = filesystemClient.Delete(ctx, nil) + _require.NotNil(err) + + leaseID := filesystemLeaseClient.LeaseID() + _, err = filesystemClient.Delete(ctx, &filesystem.DeleteOptions{ + AccessConditions: &filesystem.AccessConditions{ + LeaseAccessConditions: &filesystem.LeaseAccessConditions{ + LeaseID: leaseID, + }, + }, + }) + _require.Nil(err) +} + +func (s *LeaseRecordedTestsSuite) TestFilesystemReleaseLease() { + _require := require.New(s.T()) + testName := s.T().Name() + + //ignoreHeaders(_context.recording, headersToIgnoreForLease) + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + filesystemName := testcommon.GenerateFilesystemName(testName) + filesystemClient := testcommon.CreateNewFilesystem(context.Background(), _require, filesystemName, svcClient) + defer testcommon.DeleteFilesystem(context.Background(), _require, filesystemClient) + + filesystemLeaseClient, _ := lease.NewFilesystemClient(filesystemClient, &lease.FilesystemClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + + ctx := context.Background() + acquireLeaseResponse, err := filesystemLeaseClient.AcquireLease(ctx, int32(60), nil) + _require.Nil(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(*acquireLeaseResponse.LeaseID, *filesystemLeaseClient.LeaseID()) + + _, err = filesystemClient.Delete(ctx, nil) + _require.NotNil(err) + + _, err = filesystemLeaseClient.ReleaseLease(ctx, nil) + _require.Nil(err) + + _, err = filesystemClient.Delete(ctx, nil) + _require.Nil(err) +} + +func (s *LeaseRecordedTestsSuite) TestFilesystemRenewLease() { + _require := require.New(s.T()) + testName := s.T().Name() + + //ignoreHeaders(_context.recording, headersToIgnoreForLease) + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + filesystemName := testcommon.GenerateFilesystemName(testName) + filesystemClient := testcommon.CreateNewFilesystem(context.Background(), _require, filesystemName, svcClient) + defer testcommon.DeleteFilesystem(context.Background(), _require, filesystemClient) + + filesystemLeaseClient, _ := lease.NewFilesystemClient(filesystemClient, &lease.FilesystemClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + + ctx := context.Background() + acquireLeaseResponse, err := filesystemLeaseClient.AcquireLease(ctx, int32(15), nil) + _require.Nil(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(*acquireLeaseResponse.LeaseID, *filesystemLeaseClient.LeaseID()) + + _, err = filesystemLeaseClient.RenewLease(ctx, nil) + _require.Nil(err) + + _, err = filesystemLeaseClient.ReleaseLease(ctx, nil) + _require.Nil(err) +} + +func (s *LeaseRecordedTestsSuite) TestFilesystemChangeLease() { + _require := require.New(s.T()) + testName := s.T().Name() + + //ignoreHeaders(_context.recording, headersToIgnoreForLease) + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + fsName := testcommon.GenerateFilesystemName(testName) + fsClient := testcommon.CreateNewFilesystem(context.Background(), _require, fsName, svcClient) + defer testcommon.DeleteFilesystem(context.Background(), _require, fsClient) + + fsLeaseClient, _ := lease.NewFilesystemClient(fsClient, &lease.FilesystemClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + + ctx := context.Background() + acquireLeaseResponse, err := fsLeaseClient.AcquireLease(ctx, int32(15), nil) + _require.Nil(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(*acquireLeaseResponse.LeaseID, *fsLeaseClient.LeaseID()) + + changeLeaseResp, err := fsLeaseClient.ChangeLease(ctx, *proposedLeaseIDs[1], nil) + _require.Nil(err) + _require.EqualValues(changeLeaseResp.LeaseID, proposedLeaseIDs[1]) + _require.EqualValues(fsLeaseClient.LeaseID(), proposedLeaseIDs[1]) + + _, err = fsLeaseClient.RenewLease(ctx, nil) + _require.Nil(err) + + _, err = fsLeaseClient.ReleaseLease(ctx, nil) + _require.Nil(err) +} + +func (s *LeaseRecordedTestsSuite) TestFileAcquireLease() { + _require := require.New(s.T()) + testName := s.T().Name() + + //ignoreHeaders(_context.recording, headersToIgnoreForLease) + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + filesystemName := testcommon.GenerateFilesystemName(testName) + filesystemClient := testcommon.CreateNewFilesystem(context.Background(), _require, filesystemName, svcClient) + defer testcommon.DeleteFilesystem(context.Background(), _require, filesystemClient) + + fileName := testcommon.GenerateFileName(testName) + fileClient := testcommon.CreateNewFile(context.Background(), _require, fileName, filesystemClient) + fileLeaseClient, err := lease.NewPathClient(fileClient, &lease.PathClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + _require.NoError(err) + + ctx := context.Background() + acquireLeaseResponse, err := fileLeaseClient.AcquireLease(ctx, int32(60), nil) + _require.Nil(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(acquireLeaseResponse.LeaseID, fileLeaseClient.LeaseID()) + + _, err = fileLeaseClient.ReleaseLease(ctx, nil) + _require.Nil(err) +} + +func (s *LeaseRecordedTestsSuite) TestDeleteFileWithoutLeaseId() { + _require := require.New(s.T()) + testName := s.T().Name() + + //ignoreHeaders(_context.recording, headersToIgnoreForLease) + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + filesystemName := testcommon.GenerateFilesystemName(testName) + filesystemClient := testcommon.CreateNewFilesystem(context.Background(), _require, filesystemName, svcClient) + defer testcommon.DeleteFilesystem(context.Background(), _require, filesystemClient) + + fileName := testcommon.GenerateFileName(testName) + fileClient := testcommon.CreateNewFile(context.Background(), _require, fileName, filesystemClient) + fileLeaseClient, err := lease.NewPathClient(fileClient, &lease.PathClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + _require.NoError(err) + + ctx := context.Background() + acquireLeaseResponse, err := fileLeaseClient.AcquireLease(ctx, int32(60), nil) + _require.Nil(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(acquireLeaseResponse.LeaseID, fileLeaseClient.LeaseID()) + + _, err = fileClient.Delete(ctx, nil) + _require.NotNil(err) + + leaseID := fileLeaseClient.LeaseID() + _, err = fileClient.Delete(ctx, &file.DeleteOptions{ + AccessConditions: &file.AccessConditions{ + LeaseAccessConditions: &file.LeaseAccessConditions{ + LeaseID: leaseID, + }, + }, + }) + _require.Nil(err) +} + +func (s *LeaseRecordedTestsSuite) TestFileReleaseLease() { + _require := require.New(s.T()) + testName := s.T().Name() + + //ignoreHeaders(_context.recording, headersToIgnoreForLease) + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + filesystemName := testcommon.GenerateFilesystemName(testName) + filesystemClient := testcommon.CreateNewFilesystem(context.Background(), _require, filesystemName, svcClient) + defer testcommon.DeleteFilesystem(context.Background(), _require, filesystemClient) + + fileName := testcommon.GenerateFileName(testName) + fileClient := testcommon.CreateNewFile(context.Background(), _require, fileName, filesystemClient) + fileLeaseClient, _ := lease.NewPathClient(fileClient, &lease.PathClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + + ctx := context.Background() + acquireLeaseResponse, err := fileLeaseClient.AcquireLease(ctx, int32(60), nil) + _require.Nil(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(acquireLeaseResponse.LeaseID, fileLeaseClient.LeaseID()) + + _, err = fileClient.Delete(ctx, nil) + _require.NotNil(err) + + _, err = fileLeaseClient.ReleaseLease(ctx, nil) + _require.Nil(err) + + _, err = fileClient.Delete(ctx, nil) + _require.Nil(err) +} + +func (s *LeaseRecordedTestsSuite) TestFileRenewLease() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + filesystemName := testcommon.GenerateFilesystemName(testName) + filesystemClient := testcommon.CreateNewFilesystem(context.Background(), _require, filesystemName, svcClient) + defer testcommon.DeleteFilesystem(context.Background(), _require, filesystemClient) + + fileName := testcommon.GenerateFileName(testName) + fileClient := testcommon.CreateNewFile(context.Background(), _require, fileName, filesystemClient) + fileLeaseClient, _ := lease.NewPathClient(fileClient, &lease.PathClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + + ctx := context.Background() + acquireLeaseResponse, err := fileLeaseClient.AcquireLease(ctx, int32(15), nil) + _require.Nil(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(acquireLeaseResponse.LeaseID, fileLeaseClient.LeaseID()) + + _, err = fileLeaseClient.RenewLease(ctx, nil) + _require.Nil(err) + + _, err = fileLeaseClient.ReleaseLease(ctx, nil) + _require.Nil(err) +} + +func (s *LeaseRecordedTestsSuite) TestFileChangeLease() { + _require := require.New(s.T()) + testName := s.T().Name() + + //ignoreHeaders(_context.recording, headersToIgnoreForLease) + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + filesystemName := testcommon.GenerateFilesystemName(testName) + filesystemClient := testcommon.CreateNewFilesystem(context.Background(), _require, filesystemName, svcClient) + defer testcommon.DeleteFilesystem(context.Background(), _require, filesystemClient) + + fileName := testcommon.GenerateFileName(testName) + fileClient := testcommon.CreateNewFile(context.Background(), _require, fileName, filesystemClient) + fileLeaseClient, _ := lease.NewPathClient(fileClient, &lease.PathClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + + ctx := context.Background() + acquireLeaseResponse, err := fileLeaseClient.AcquireLease(ctx, int32(15), nil) + _require.Nil(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.Equal(*acquireLeaseResponse.LeaseID, *proposedLeaseIDs[0]) + + changeLeaseResp, err := fileLeaseClient.ChangeLease(ctx, *proposedLeaseIDs[1], nil) + _require.Nil(err) + _require.Equal(*changeLeaseResp.LeaseID, *proposedLeaseIDs[1]) + + _, err = fileLeaseClient.RenewLease(ctx, nil) + _require.Nil(err) + + _, err = fileLeaseClient.ReleaseLease(ctx, nil) + _require.Nil(err) +} diff --git a/sdk/storage/azdatalake/lease/models.go b/sdk/storage/azdatalake/lease/models.go index c09d30a21b40..8148120b49b3 100644 --- a/sdk/storage/azdatalake/lease/models.go +++ b/sdk/storage/azdatalake/lease/models.go @@ -18,6 +18,9 @@ type FilesystemAcquireOptions struct { } func (o *FilesystemAcquireOptions) format() *lease.ContainerAcquireOptions { + if o == nil || o.ModifiedAccessConditions == nil { + return nil + } return &lease.ContainerAcquireOptions{ ModifiedAccessConditions: &blob.ModifiedAccessConditions{ IfModifiedSince: o.ModifiedAccessConditions.IfModifiedSince, @@ -35,6 +38,14 @@ type FilesystemBreakOptions struct { } func (o *FilesystemBreakOptions) format() *lease.ContainerBreakOptions { + if o == nil || o.ModifiedAccessConditions == nil { + if o != nil { + return &lease.ContainerBreakOptions{ + BreakPeriod: o.BreakPeriod, + } + } + return nil + } return &lease.ContainerBreakOptions{ BreakPeriod: o.BreakPeriod, ModifiedAccessConditions: &blob.ModifiedAccessConditions{ @@ -52,6 +63,9 @@ type FilesystemChangeOptions struct { } func (o *FilesystemChangeOptions) format() *lease.ContainerChangeOptions { + if o == nil || o.ModifiedAccessConditions == nil { + return nil + } return &lease.ContainerChangeOptions{ ModifiedAccessConditions: &blob.ModifiedAccessConditions{ IfModifiedSince: o.ModifiedAccessConditions.IfModifiedSince, @@ -67,6 +81,9 @@ type FilesystemReleaseOptions struct { } func (o *FilesystemReleaseOptions) format() *lease.ContainerReleaseOptions { + if o == nil || o.ModifiedAccessConditions == nil { + return nil + } return &lease.ContainerReleaseOptions{ ModifiedAccessConditions: &blob.ModifiedAccessConditions{ IfModifiedSince: o.ModifiedAccessConditions.IfModifiedSince, @@ -82,6 +99,9 @@ type FilesystemRenewOptions struct { } func (o *FilesystemRenewOptions) format() *lease.ContainerRenewOptions { + if o == nil || o.ModifiedAccessConditions == nil { + return nil + } return &lease.ContainerRenewOptions{ ModifiedAccessConditions: &blob.ModifiedAccessConditions{ IfModifiedSince: o.ModifiedAccessConditions.IfModifiedSince, @@ -98,6 +118,9 @@ type PathAcquireOptions struct { } func (o *PathAcquireOptions) format() *lease.BlobAcquireOptions { + if o == nil || o.ModifiedAccessConditions == nil { + return nil + } return &lease.BlobAcquireOptions{ ModifiedAccessConditions: &blob.ModifiedAccessConditions{ IfModifiedSince: o.ModifiedAccessConditions.IfModifiedSince, @@ -115,6 +138,14 @@ type PathBreakOptions struct { } func (o *PathBreakOptions) format() *lease.BlobBreakOptions { + if o == nil || o.ModifiedAccessConditions == nil { + if o != nil { + return &lease.BlobBreakOptions{ + BreakPeriod: o.BreakPeriod, + } + } + return nil + } return &lease.BlobBreakOptions{ BreakPeriod: o.BreakPeriod, ModifiedAccessConditions: &blob.ModifiedAccessConditions{ @@ -132,6 +163,9 @@ type PathChangeOptions struct { } func (o *PathChangeOptions) format() *lease.BlobChangeOptions { + if o == nil || o.ModifiedAccessConditions == nil { + return nil + } return &lease.BlobChangeOptions{ ModifiedAccessConditions: &blob.ModifiedAccessConditions{ IfModifiedSince: o.ModifiedAccessConditions.IfModifiedSince, @@ -147,6 +181,9 @@ type PathReleaseOptions struct { } func (o *PathReleaseOptions) format() *lease.BlobReleaseOptions { + if o == nil || o.ModifiedAccessConditions == nil { + return nil + } return &lease.BlobReleaseOptions{ ModifiedAccessConditions: &blob.ModifiedAccessConditions{ IfModifiedSince: o.ModifiedAccessConditions.IfModifiedSince, @@ -162,6 +199,9 @@ type PathRenewOptions struct { } func (o *PathRenewOptions) format() *lease.BlobRenewOptions { + if o == nil || o.ModifiedAccessConditions == nil { + return nil + } return &lease.BlobRenewOptions{ ModifiedAccessConditions: &blob.ModifiedAccessConditions{ IfModifiedSince: o.ModifiedAccessConditions.IfModifiedSince, diff --git a/sdk/storage/azdatalake/lease/path_client.go b/sdk/storage/azdatalake/lease/path_client.go index ec7aa846c47e..4e35b6d71251 100644 --- a/sdk/storage/azdatalake/lease/path_client.go +++ b/sdk/storage/azdatalake/lease/path_client.go @@ -8,9 +8,14 @@ package lease import ( "context" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/lease" "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/directory" "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/file" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/base" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/internal/generated" ) // PathClient provides lease functionality for the underlying path client. @@ -20,17 +25,29 @@ type PathClient struct { } // PathClientOptions contains the optional values when creating a PathClient. -type PathClientOptions struct { - // LeaseID contains a caller-provided lease ID. - LeaseID *string -} +type PathClientOptions = lease.BlobClientOptions // NewPathClient creates a path lease client for the provided path client. // - client - an instance of a path client // - options - client options; pass nil to accept the default values func NewPathClient[T directory.Client | file.Client](client *T, options *PathClientOptions) (*PathClient, error) { - // TODO: set up blob lease client - return nil, nil + var blobClient *blockblob.Client + switch t := any(client).(type) { + case *directory.Client: + _, _, blobClient = base.InnerClients((*base.CompositeClient[generated.PathClient, generated.PathClient, blockblob.Client])(t)) + case *file.Client: + _, _, blobClient = base.InnerClients((*base.CompositeClient[generated.PathClient, generated.PathClient, blockblob.Client])(t)) + default: + return nil, fmt.Errorf("unhandled client type %T", client) + } + blobLeaseClient, err := lease.NewBlobClient(blobClient, options) + if err != nil { + return nil, exported.ConvertToDFSError(err) + } + return &PathClient{ + blobClient: blobLeaseClient, + leaseID: blobLeaseClient.LeaseID(), + }, nil } // LeaseID returns leaseID of the client. @@ -56,7 +73,12 @@ func (c *PathClient) BreakLease(ctx context.Context, o *PathBreakOptions) (PathB // For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. func (c *PathClient) ChangeLease(ctx context.Context, proposedID string, o *PathChangeOptions) (PathChangeResponse, error) { opts := o.format() - return c.blobClient.ChangeLease(ctx, proposedID, opts) + resp, err := c.blobClient.ChangeLease(ctx, proposedID, opts) + if err != nil { + return PathChangeResponse{}, err + } + c.leaseID = &proposedID + return resp, nil } // RenewLease renews the path's previously-acquired lease. From e8a41ef4d21505bb8ea058032031db70e611d850 Mon Sep 17 00:00:00 2001 From: Tamer Sherif Date: Mon, 31 Jul 2023 14:18:29 -0700 Subject: [PATCH 48/50] tests --- sdk/storage/azdatalake/assets.json | 2 +- .../internal/testcommon/clients_auth.go | 7 + sdk/storage/azdatalake/lease/client_test.go | 170 ++++++++++++++++++ 3 files changed, 178 insertions(+), 1 deletion(-) diff --git a/sdk/storage/azdatalake/assets.json b/sdk/storage/azdatalake/assets.json index f1deb459450c..89fb3cdc8b61 100644 --- a/sdk/storage/azdatalake/assets.json +++ b/sdk/storage/azdatalake/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "go", "TagPrefix": "go/storage/azdatalake", - "Tag": "go/storage/azdatalake_0fd3c536e7" + "Tag": "go/storage/azdatalake_78f150eb1d" } \ No newline at end of file diff --git a/sdk/storage/azdatalake/internal/testcommon/clients_auth.go b/sdk/storage/azdatalake/internal/testcommon/clients_auth.go index f7a455da259a..93a91257b1b8 100644 --- a/sdk/storage/azdatalake/internal/testcommon/clients_auth.go +++ b/sdk/storage/azdatalake/internal/testcommon/clients_auth.go @@ -148,6 +148,13 @@ func CreateNewFile(ctx context.Context, _require *require.Assertions, fileName s return fileClient } +func CreateNewDir(ctx context.Context, _require *require.Assertions, dirName string, filesystemClient *filesystem.Client) *directory.Client { + dirClient := filesystemClient.NewDirectoryClient(dirName) + _, err := dirClient.Create(ctx, nil) + _require.Nil(err) + return dirClient +} + func GetDirClient(fsName, dirName string, t *testing.T, accountType TestAccountType, options *directory.ClientOptions) (*directory.Client, error) { if options == nil { options = &directory.ClientOptions{} diff --git a/sdk/storage/azdatalake/lease/client_test.go b/sdk/storage/azdatalake/lease/client_test.go index cb37374e5683..51f2183ea718 100644 --- a/sdk/storage/azdatalake/lease/client_test.go +++ b/sdk/storage/azdatalake/lease/client_test.go @@ -388,3 +388,173 @@ func (s *LeaseRecordedTestsSuite) TestFileChangeLease() { _, err = fileLeaseClient.ReleaseLease(ctx, nil) _require.Nil(err) } + +func (s *LeaseRecordedTestsSuite) TestDirAcquireLease() { + _require := require.New(s.T()) + testName := s.T().Name() + + //ignoreHeaders(_context.recording, headersToIgnoreForLease) + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + filesystemName := testcommon.GenerateFilesystemName(testName) + filesystemClient := testcommon.CreateNewFilesystem(context.Background(), _require, filesystemName, svcClient) + defer testcommon.DeleteFilesystem(context.Background(), _require, filesystemClient) + + dirName := testcommon.GenerateDirName(testName) + dirClient := testcommon.CreateNewDir(context.Background(), _require, dirName, filesystemClient) + dirLeaseClient, err := lease.NewPathClient(dirClient, &lease.PathClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + _require.NoError(err) + + ctx := context.Background() + acquireLeaseResponse, err := dirLeaseClient.AcquireLease(ctx, int32(60), nil) + _require.Nil(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(acquireLeaseResponse.LeaseID, dirLeaseClient.LeaseID()) + + _, err = dirLeaseClient.ReleaseLease(ctx, nil) + _require.Nil(err) +} + +func (s *LeaseRecordedTestsSuite) TestDeleteDirWithoutLeaseId() { + _require := require.New(s.T()) + testName := s.T().Name() + + //ignoreHeaders(_context.recording, headersToIgnoreForLease) + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + filesystemName := testcommon.GenerateFilesystemName(testName) + filesystemClient := testcommon.CreateNewFilesystem(context.Background(), _require, filesystemName, svcClient) + defer testcommon.DeleteFilesystem(context.Background(), _require, filesystemClient) + + dirName := testcommon.GenerateDirName(testName) + dirClient := testcommon.CreateNewDir(context.Background(), _require, dirName, filesystemClient) + dirLeaseClient, err := lease.NewPathClient(dirClient, &lease.PathClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + _require.NoError(err) + + ctx := context.Background() + acquireLeaseResponse, err := dirLeaseClient.AcquireLease(ctx, int32(60), nil) + _require.Nil(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(acquireLeaseResponse.LeaseID, dirLeaseClient.LeaseID()) + + _, err = dirClient.Delete(ctx, nil) + _require.NotNil(err) + + leaseID := dirLeaseClient.LeaseID() + _, err = dirClient.Delete(ctx, &file.DeleteOptions{ + AccessConditions: &file.AccessConditions{ + LeaseAccessConditions: &file.LeaseAccessConditions{ + LeaseID: leaseID, + }, + }, + }) + _require.Nil(err) +} + +func (s *LeaseRecordedTestsSuite) TestDirReleaseLease() { + _require := require.New(s.T()) + testName := s.T().Name() + + //ignoreHeaders(_context.recording, headersToIgnoreForLease) + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + filesystemName := testcommon.GenerateFilesystemName(testName) + filesystemClient := testcommon.CreateNewFilesystem(context.Background(), _require, filesystemName, svcClient) + defer testcommon.DeleteFilesystem(context.Background(), _require, filesystemClient) + + DirName := testcommon.GenerateDirName(testName) + DirClient := testcommon.CreateNewDir(context.Background(), _require, DirName, filesystemClient) + DirLeaseClient, _ := lease.NewPathClient(DirClient, &lease.PathClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + + ctx := context.Background() + acquireLeaseResponse, err := DirLeaseClient.AcquireLease(ctx, int32(60), nil) + _require.Nil(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(acquireLeaseResponse.LeaseID, DirLeaseClient.LeaseID()) + + _, err = DirClient.Delete(ctx, nil) + _require.NotNil(err) + + _, err = DirLeaseClient.ReleaseLease(ctx, nil) + _require.Nil(err) + + _, err = DirClient.Delete(ctx, nil) + _require.Nil(err) +} + +func (s *LeaseRecordedTestsSuite) TestDirRenewLease() { + _require := require.New(s.T()) + testName := s.T().Name() + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + filesystemName := testcommon.GenerateFilesystemName(testName) + filesystemClient := testcommon.CreateNewFilesystem(context.Background(), _require, filesystemName, svcClient) + defer testcommon.DeleteFilesystem(context.Background(), _require, filesystemClient) + + dirName := testcommon.GenerateDirName(testName) + dirClient := testcommon.CreateNewDir(context.Background(), _require, dirName, filesystemClient) + dirLeaseClient, _ := lease.NewPathClient(dirClient, &lease.PathClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + + ctx := context.Background() + acquireLeaseResponse, err := dirLeaseClient.AcquireLease(ctx, int32(15), nil) + _require.Nil(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.EqualValues(acquireLeaseResponse.LeaseID, dirLeaseClient.LeaseID()) + + _, err = dirLeaseClient.RenewLease(ctx, nil) + _require.Nil(err) + + _, err = dirLeaseClient.ReleaseLease(ctx, nil) + _require.Nil(err) +} + +func (s *LeaseRecordedTestsSuite) TestDirChangeLease() { + _require := require.New(s.T()) + testName := s.T().Name() + + //ignoreHeaders(_context.recording, headersToIgnoreForLease) + + svcClient, err := testcommon.GetServiceClient(s.T(), testcommon.TestAccountDatalake, nil) + _require.NoError(err) + + filesystemName := testcommon.GenerateFilesystemName(testName) + filesystemClient := testcommon.CreateNewFilesystem(context.Background(), _require, filesystemName, svcClient) + defer testcommon.DeleteFilesystem(context.Background(), _require, filesystemClient) + + dirName := testcommon.GenerateDirName(testName) + dirClient := testcommon.CreateNewDir(context.Background(), _require, dirName, filesystemClient) + dirLeaseClient, _ := lease.NewPathClient(dirClient, &lease.PathClientOptions{ + LeaseID: proposedLeaseIDs[0], + }) + + ctx := context.Background() + acquireLeaseResponse, err := dirLeaseClient.AcquireLease(ctx, int32(15), nil) + _require.Nil(err) + _require.NotNil(acquireLeaseResponse.LeaseID) + _require.Equal(*acquireLeaseResponse.LeaseID, *proposedLeaseIDs[0]) + + changeLeaseResp, err := dirLeaseClient.ChangeLease(ctx, *proposedLeaseIDs[1], nil) + _require.Nil(err) + _require.Equal(*changeLeaseResp.LeaseID, *proposedLeaseIDs[1]) + + _, err = dirLeaseClient.RenewLease(ctx, nil) + _require.Nil(err) + + _, err = dirLeaseClient.ReleaseLease(ctx, nil) + _require.Nil(err) +} From c2891f54ee572015450e8c606b8d6fb0cc43d1e7 Mon Sep 17 00:00:00 2001 From: Tamer Sherif Date: Mon, 31 Jul 2023 14:35:10 -0700 Subject: [PATCH 49/50] handled errors --- sdk/storage/azdatalake/lease/client.go | 14 +++++++++----- sdk/storage/azdatalake/lease/path_client.go | 14 +++++++++----- sdk/storage/azdatalake/service/client.go | 4 ++-- 3 files changed, 20 insertions(+), 12 deletions(-) diff --git a/sdk/storage/azdatalake/lease/client.go b/sdk/storage/azdatalake/lease/client.go index 8844084043c3..d59dfcf1a7da 100644 --- a/sdk/storage/azdatalake/lease/client.go +++ b/sdk/storage/azdatalake/lease/client.go @@ -50,7 +50,8 @@ func (c *FilesystemClient) LeaseID() *string { // For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. func (c *FilesystemClient) AcquireLease(ctx context.Context, duration int32, o *FilesystemAcquireOptions) (FilesystemAcquireResponse, error) { opts := o.format() - return c.containerClient.AcquireLease(ctx, duration, opts) + resp, err := c.containerClient.AcquireLease(ctx, duration, opts) + return resp, exported.ConvertToDFSError(err) } // BreakLease breaks the filesystem's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) @@ -58,7 +59,8 @@ func (c *FilesystemClient) AcquireLease(ctx context.Context, duration int32, o * // For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. func (c *FilesystemClient) BreakLease(ctx context.Context, o *FilesystemBreakOptions) (FilesystemBreakResponse, error) { opts := o.format() - return c.containerClient.BreakLease(ctx, opts) + resp, err := c.containerClient.BreakLease(ctx, opts) + return resp, exported.ConvertToDFSError(err) } // ChangeLease changes the filesystem's lease ID. @@ -67,7 +69,7 @@ func (c *FilesystemClient) ChangeLease(ctx context.Context, proposedLeaseID stri opts := o.format() resp, err := c.containerClient.ChangeLease(ctx, proposedLeaseID, opts) if err != nil { - return FilesystemChangeResponse{}, err + return resp, exported.ConvertToDFSError(err) } c.leaseID = &proposedLeaseID return resp, nil @@ -77,11 +79,13 @@ func (c *FilesystemClient) ChangeLease(ctx context.Context, proposedLeaseID stri // For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. func (c *FilesystemClient) RenewLease(ctx context.Context, o *FilesystemRenewOptions) (FilesystemRenewResponse, error) { opts := o.format() - return c.containerClient.RenewLease(ctx, opts) + resp, err := c.containerClient.RenewLease(ctx, opts) + return resp, exported.ConvertToDFSError(err) } // ReleaseLease releases the filesystem's previously-acquired lease. func (c *FilesystemClient) ReleaseLease(ctx context.Context, o *FilesystemReleaseOptions) (FilesystemReleaseResponse, error) { opts := o.format() - return c.containerClient.ReleaseLease(ctx, opts) + resp, err := c.containerClient.ReleaseLease(ctx, opts) + return resp, exported.ConvertToDFSError(err) } diff --git a/sdk/storage/azdatalake/lease/path_client.go b/sdk/storage/azdatalake/lease/path_client.go index 4e35b6d71251..97e785571c05 100644 --- a/sdk/storage/azdatalake/lease/path_client.go +++ b/sdk/storage/azdatalake/lease/path_client.go @@ -60,13 +60,15 @@ func (c *PathClient) LeaseID() *string { // For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. func (c *PathClient) AcquireLease(ctx context.Context, duration int32, o *PathAcquireOptions) (PathAcquireResponse, error) { opts := o.format() - return c.blobClient.AcquireLease(ctx, duration, opts) + resp, err := c.blobClient.AcquireLease(ctx, duration, opts) + return resp, exported.ConvertToDFSError(err) } // BreakLease breaks the path's previously-acquired lease. func (c *PathClient) BreakLease(ctx context.Context, o *PathBreakOptions) (PathBreakResponse, error) { opts := o.format() - return c.blobClient.BreakLease(ctx, opts) + resp, err := c.blobClient.BreakLease(ctx, opts) + return resp, exported.ConvertToDFSError(err) } // ChangeLease changes the path's lease ID. @@ -75,7 +77,7 @@ func (c *PathClient) ChangeLease(ctx context.Context, proposedID string, o *Path opts := o.format() resp, err := c.blobClient.ChangeLease(ctx, proposedID, opts) if err != nil { - return PathChangeResponse{}, err + return resp, exported.ConvertToDFSError(err) } c.leaseID = &proposedID return resp, nil @@ -85,12 +87,14 @@ func (c *PathClient) ChangeLease(ctx context.Context, proposedID string, o *Path // For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. func (c *PathClient) RenewLease(ctx context.Context, o *PathRenewOptions) (PathRenewResponse, error) { opts := o.format() - return c.blobClient.RenewLease(ctx, opts) + resp, err := c.blobClient.RenewLease(ctx, opts) + return resp, exported.ConvertToDFSError(err) } // ReleaseLease releases the path's previously-acquired lease. // For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. func (c *PathClient) ReleaseLease(ctx context.Context, o *PathReleaseOptions) (PathReleaseResponse, error) { opts := o.format() - return c.blobClient.ReleaseLease(ctx, opts) + resp, err := c.blobClient.ReleaseLease(ctx, opts) + return resp, exported.ConvertToDFSError(err) } diff --git a/sdk/storage/azdatalake/service/client.go b/sdk/storage/azdatalake/service/client.go index 33e96821f3bd..9a31c3f6a8ab 100644 --- a/sdk/storage/azdatalake/service/client.go +++ b/sdk/storage/azdatalake/service/client.go @@ -147,7 +147,7 @@ func (s *Client) getClientOptions() *base.ClientOptions { // The new filesystem.Client uses the same request policy pipeline as the Client. func (s *Client) NewFilesystemClient(filesystemName string) *filesystem.Client { filesystemURL := runtime.JoinPaths(s.generatedServiceClientWithDFS().Endpoint(), filesystemName) - filesystemURL, containerURL := shared.GetURLs(filesystemURL) + containerURL, filesystemURL := shared.GetURLs(filesystemURL) return (*filesystem.Client)(base.NewFilesystemClient(filesystemURL, containerURL, s.serviceClient().NewContainerClient(filesystemName), s.generatedServiceClientWithDFS().InternalClient().WithClientName(shared.FilesystemClient), s.sharedKey(), s.identityCredential(), s.getClientOptions())) } @@ -162,7 +162,7 @@ func (s *Client) GetUserDelegationCredential(ctx context.Context, info KeyInfo, getUserDelegationKeyOptions := o.format() udk, err := s.generatedServiceClientWithBlob().GetUserDelegationKey(ctx, info, getUserDelegationKeyOptions) if err != nil { - return nil, err + return nil, exported.ConvertToDFSError(err) } return exported.NewUserDelegationCredential(strings.Split(url.Host, ".")[0], udk.UserDelegationKey), nil From 767cbd9a46155e043328e159fac38d601329cadf Mon Sep 17 00:00:00 2001 From: Tamer Sherif Date: Tue, 1 Aug 2023 11:21:29 -0700 Subject: [PATCH 50/50] nit --- sdk/storage/azdatalake/lease/models.go | 28 +++++++++++++------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/sdk/storage/azdatalake/lease/models.go b/sdk/storage/azdatalake/lease/models.go index 8148120b49b3..ce552403c698 100644 --- a/sdk/storage/azdatalake/lease/models.go +++ b/sdk/storage/azdatalake/lease/models.go @@ -38,13 +38,13 @@ type FilesystemBreakOptions struct { } func (o *FilesystemBreakOptions) format() *lease.ContainerBreakOptions { - if o == nil || o.ModifiedAccessConditions == nil { - if o != nil { - return &lease.ContainerBreakOptions{ - BreakPeriod: o.BreakPeriod, - } - } - return nil + opts := &lease.ContainerBreakOptions{} + if o == nil { + return opts + } + if o.ModifiedAccessConditions == nil { + opts.BreakPeriod = o.BreakPeriod + return opts } return &lease.ContainerBreakOptions{ BreakPeriod: o.BreakPeriod, @@ -138,13 +138,13 @@ type PathBreakOptions struct { } func (o *PathBreakOptions) format() *lease.BlobBreakOptions { - if o == nil || o.ModifiedAccessConditions == nil { - if o != nil { - return &lease.BlobBreakOptions{ - BreakPeriod: o.BreakPeriod, - } - } - return nil + opts := &lease.BlobBreakOptions{} + if o == nil { + return opts + } + if o.ModifiedAccessConditions == nil { + opts.BreakPeriod = o.BreakPeriod + return opts } return &lease.BlobBreakOptions{ BreakPeriod: o.BreakPeriod,