diff --git a/cdc/model/sink.go b/cdc/model/sink.go index e6f731ec7d9..46345ee1f40 100644 --- a/cdc/model/sink.go +++ b/cdc/model/sink.go @@ -272,6 +272,16 @@ func (r *RowChangedEvent) IsDelete() bool { return len(r.PreColumns) != 0 && len(r.Columns) == 0 } +// IsInsert returns true if the row is an insert event +func (r *RowChangedEvent) IsInsert() bool { + return len(r.PreColumns) == 0 && len(r.Columns) != 0 +} + +// IsUpdate returns true if the row is an update event +func (r *RowChangedEvent) IsUpdate() bool { + return len(r.PreColumns) != 0 && len(r.Columns) != 0 +} + // PrimaryKeyColumns returns the column(s) corresponding to the handle key(s) func (r *RowChangedEvent) PrimaryKeyColumns() []*Column { pkeyCols := make([]*Column, 0) diff --git a/cdc/owner/feed_state_manager_test.go b/cdc/owner/feed_state_manager_test.go index 6bb962938e7..3b6ea94a83b 100644 --- a/cdc/owner/feed_state_manager_test.go +++ b/cdc/owner/feed_state_manager_test.go @@ -233,7 +233,7 @@ func (s *feedStateManagerSuite) TestChangefeedStatusNotExist(c *check.C) { state := orchestrator.NewChangefeedReactorState(ctx.ChangefeedVars().ID) tester := orchestrator.NewReactorStateTester(c, state, map[string]string{ "/tidb/cdc/capture/d563bfc0-f406-4f34-bc7d-6dc2e35a44e5": `{"id":"d563bfc0-f406-4f34-bc7d-6dc2e35a44e5","address":"172.16.6.147:8300","version":"v5.0.0-master-dirty"}`, - "/tidb/cdc/changefeed/info/" + ctx.ChangefeedVars().ID: `{"sink-uri":"blackhole:///","opts":{},"create-time":"2021-06-05T00:44:15.065939487+08:00","start-ts":425381670108266496,"target-ts":0,"admin-job-type":1,"sort-engine":"unified","config":{"case-sensitive":true,"enable-old-value":true,"force-replicate":false,"check-gc-safe-point":true,"filter":{"rules":["*.*"],"ignore-txn-start-ts":null},"mounter":{"worker-num":16},"sink":{"dispatchers":null,"protocol":"default"},"cyclic-replication":{"enable":false,"replica-id":0,"filter-replica-ids":null,"id-buckets":0,"sync-ddl":false},"scheduler":{"type":"table-number","polling-time":-1}},"state":"failed","history":[],"error":{"addr":"172.16.6.147:8300","code":"CDC:ErrSnapshotLostByGC","message":"[CDC:ErrSnapshotLostByGC]fail to create or maintain changefeed due to snapshot loss caused by GC. checkpoint-ts 425381670108266496 is earlier than GC safepoint at 0"},"sync-point-enabled":false,"sync-point-interval":600000000000,"creator-version":"v5.0.0-master-dirty"}`, + "/tidb/cdc/changefeed/info/" + ctx.ChangefeedVars().ID: `{"sink-uri":"blackhole:///","opts":{},"create-time":"2021-06-05T00:44:15.065939487+08:00","start-ts":425381670108266496,"target-ts":0,"admin-job-type":1,"sort-engine":"unified","config":{"case-sensitive":true,"enable-old-value":true,"force-replicate":false,"check-gc-safe-point":true,"filter":{"rules":["*.*"],"ignore-txn-start-ts":null},"mounter":{"worker-num":16},"sink":{"dispatchers":null,"protocol":"open-protocol"},"cyclic-replication":{"enable":false,"replica-id":0,"filter-replica-ids":null,"id-buckets":0,"sync-ddl":false},"scheduler":{"type":"table-number","polling-time":-1}},"state":"failed","history":[],"error":{"addr":"172.16.6.147:8300","code":"CDC:ErrSnapshotLostByGC","message":"[CDC:ErrSnapshotLostByGC]fail to create or maintain changefeed due to snapshot loss caused by GC. checkpoint-ts 425381670108266496 is earlier than GC safepoint at 0"},"sync-point-enabled":false,"sync-point-interval":600000000000,"creator-version":"v5.0.0-master-dirty"}`, "/tidb/cdc/owner/156579d017f84a68": "d563bfc0-f406-4f34-bc7d-6dc2e35a44e5", }) manager.Tick(state) diff --git a/cdc/owner/scheduler.go b/cdc/owner/scheduler.go index 9b3f45616c8..1fed90c0243 100644 --- a/cdc/owner/scheduler.go +++ b/cdc/owner/scheduler.go @@ -127,11 +127,6 @@ func (s *schedulerV2) DispatchTable( captureID model.CaptureID, isDelete bool, ) (done bool, err error) { - client, ok := s.GetClient(ctx, captureID) - if !ok { - return false, nil - } - topic := model.DispatchTableTopic(changeFeedID) message := &model.DispatchTableMessage{ OwnerRev: ctx.GlobalVars().OwnerRevision, @@ -139,14 +134,13 @@ func (s *schedulerV2) DispatchTable( IsDelete: isDelete, } - _, err = client.TrySendMessage(ctx, topic, message) + ok, err := s.trySendMessage(ctx, captureID, topic, message) if err != nil { - if cerror.ErrPeerMessageSendTryAgain.Equal(err) { - log.Warn("scheduler: send message failed, retry later", zap.Error(err)) - return false, nil - } return false, errors.Trace(err) } + if !ok { + return false, nil + } s.stats.RecordDispatch() log.Debug("send message successfully", @@ -161,25 +155,19 @@ func (s *schedulerV2) Announce( changeFeedID model.ChangeFeedID, captureID model.CaptureID, ) (bool, error) { - client, ok := s.GetClient(ctx, captureID) - if !ok { - return false, nil - } - topic := model.AnnounceTopic(changeFeedID) message := &model.AnnounceMessage{ OwnerRev: ctx.GlobalVars().OwnerRevision, OwnerVersion: version.ReleaseSemver(), } - _, err := client.TrySendMessage(ctx, topic, message) + ok, err := s.trySendMessage(ctx, captureID, topic, message) if err != nil { - if cerror.ErrPeerMessageSendTryAgain.Equal(err) { - log.Warn("scheduler: send message failed, retry later", zap.Error(err)) - return false, nil - } return false, errors.Trace(err) } + if !ok { + return false, nil + } s.stats.RecordAnnounce() log.Debug("send message successfully", @@ -189,7 +177,7 @@ func (s *schedulerV2) Announce( return true, nil } -func (s *schedulerV2) GetClient(ctx context.Context, target model.CaptureID) (*p2p.MessageClient, bool) { +func (s *schedulerV2) getClient(target model.CaptureID) (*p2p.MessageClient, bool) { client := s.messageRouter.GetClient(target) if client == nil { log.Warn("scheduler: no message client found, retry later", @@ -199,6 +187,38 @@ func (s *schedulerV2) GetClient(ctx context.Context, target model.CaptureID) (*p return client, true } +func (s *schedulerV2) trySendMessage( + ctx context.Context, + target model.CaptureID, + topic p2p.Topic, + value interface{}, +) (bool, error) { + // TODO (zixiong): abstract this function out together with the similar method in cdc/processor/agent.go + // We probably need more advanced logic to handle and mitigate complex failure situations. + + client, ok := s.getClient(target) + if !ok { + return false, nil + } + + _, err := client.TrySendMessage(ctx, topic, value) + if err != nil { + if cerror.ErrPeerMessageSendTryAgain.Equal(err) { + return false, nil + } + if cerror.ErrPeerMessageClientClosed.Equal(err) { + log.Warn("peer messaging client is closed while trying to send a message through it. "+ + "Report a bug if this warning repeats", + zap.String("changefeed-id", s.changeFeedID), + zap.String("target", target)) + return false, nil + } + return false, errors.Trace(err) + } + + return true, nil +} + func (s *schedulerV2) Close(ctx context.Context) { log.Debug("scheduler closed", zap.String("changefeed-id", s.changeFeedID)) s.deregisterPeerMessageHandlers(ctx) diff --git a/cdc/owner/scheduler_test.go b/cdc/owner/scheduler_test.go index 45ac547216c..c02d8845183 100644 --- a/cdc/owner/scheduler_test.go +++ b/cdc/owner/scheduler_test.go @@ -40,6 +40,11 @@ func TestSchedulerBasics(t *testing.T) { _ = failpoint.Disable("github.com/pingcap/tiflow/pkg/p2p/ClientInjectSendMessageTryAgain") }() + _ = failpoint.Enable("github.com/pingcap/tiflow/pkg/p2p/ClientInjectClosed", "5*return(true)") + defer func() { + _ = failpoint.Disable("github.com/pingcap/tiflow/pkg/p2p/ClientInjectClosed") + }() + stdCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() diff --git a/cdc/owner/scheduler_v1.go b/cdc/owner/scheduler_v1.go index 480a170d6a9..6fb504f83b3 100644 --- a/cdc/owner/scheduler_v1.go +++ b/cdc/owner/scheduler_v1.go @@ -309,6 +309,11 @@ func (s *oldScheduler) handleJobs(jobs []*schedulerJob) { func (s *oldScheduler) cleanUpFinishedOperations() { for captureID := range s.state.TaskStatuses { s.state.PatchTaskStatus(captureID, func(status *model.TaskStatus) (*model.TaskStatus, bool, error) { + if status == nil { + log.Warn("task status of the capture is not found, may be the key in etcd was deleted", zap.String("captureID", captureID), zap.String("changeFeedID", s.state.ID)) + return status, false, nil + } + changed := false for tableID, operation := range status.Operation { if operation.Status == model.OperFinished { diff --git a/cdc/owner/scheduler_v1_test.go b/cdc/owner/scheduler_v1_test.go index e732669873f..eb2950b7190 100644 --- a/cdc/owner/scheduler_v1_test.go +++ b/cdc/owner/scheduler_v1_test.go @@ -19,6 +19,7 @@ import ( "github.com/pingcap/check" "github.com/pingcap/tiflow/cdc/model" + "github.com/pingcap/tiflow/pkg/etcd" "github.com/pingcap/tiflow/pkg/orchestrator" "github.com/pingcap/tiflow/pkg/util/testleak" ) @@ -83,8 +84,24 @@ func (s *schedulerSuite) finishTableOperation(captureID model.CaptureID, tableID func (s *schedulerSuite) TestScheduleOneCapture(c *check.C) { defer testleak.AfterTest(c)() + + s.reset(c) + captureID := "test-capture-0" + s.addCapture(captureID) + + _, _ = s.scheduler.Tick(s.state, []model.TableID{}, s.captures) + + // Manually simulate the scenario where the corresponding key was deleted in the etcd + key := &etcd.CDCKey{ + Tp: etcd.CDCKeyTypeTaskStatus, + CaptureID: captureID, + ChangefeedID: s.state.ID, + } + s.tester.MustUpdate(key.String(), nil) + s.tester.MustApplyPatches() + s.reset(c) - captureID := "test-capture-1" + captureID = "test-capture-1" s.addCapture(captureID) // add three tables diff --git a/cdc/processor/agent.go b/cdc/processor/agent.go index 44460b1758f..2f966f91b74 100644 --- a/cdc/processor/agent.go +++ b/cdc/processor/agent.go @@ -288,6 +288,9 @@ func (a *agentImpl) trySendMessage( topic p2p.Topic, value interface{}, ) (bool, error) { + // TODO (zixiong): abstract this function out together with the similar method in cdc/owner/scheduler.go + // We probably need more advanced logic to handle and mitigate complex failure situations. + client := a.messageRouter.GetClient(target) if client == nil { a.printNoClientWarning(target) @@ -299,6 +302,13 @@ func (a *agentImpl) trySendMessage( if cerror.ErrPeerMessageSendTryAgain.Equal(err) { return false, nil } + if cerror.ErrPeerMessageClientClosed.Equal(err) { + log.Warn("peer messaging client is closed while trying to send a message through it. "+ + "Report a bug if this warning repeats", + zap.String("changefeed-id", a.changeFeed), + zap.String("target", target)) + return false, nil + } return false, errors.Trace(err) } diff --git a/cdc/processor/agent_test.go b/cdc/processor/agent_test.go index ddff02e468b..f6ed64d2c17 100644 --- a/cdc/processor/agent_test.go +++ b/cdc/processor/agent_test.go @@ -18,6 +18,7 @@ import ( "testing" "time" + "github.com/pingcap/failpoint" "github.com/pingcap/tiflow/cdc/model" pscheduler "github.com/pingcap/tiflow/cdc/scheduler" cdcContext "github.com/pingcap/tiflow/pkg/context" @@ -334,3 +335,45 @@ func TestAgentNoOwnerAtStartUp(t *testing.T) { err = agent.Close() require.NoError(t, err) } + +func TestAgentTolerateClientClosed(t *testing.T) { + suite := newAgentTestSuite(t) + defer suite.Close() + + suite.etcdKVClient.On("Get", mock.Anything, etcd.CaptureOwnerKey, mock.Anything).Return(&clientv3.GetResponse{ + Kvs: []*mvccpb.KeyValue{ + { + Key: []byte(etcd.CaptureOwnerKey), + Value: []byte(ownerCaptureID), + ModRevision: 1, + }, + }, + }, nil).Once() + + // Test Point 1: Create an agent. + agent, err := suite.CreateAgent(t) + require.NoError(t, err) + + _ = failpoint.Enable("github.com/pingcap/tiflow/pkg/p2p/ClientInjectClosed", "5*return(true)") + defer func() { + _ = failpoint.Disable("github.com/pingcap/tiflow/pkg/p2p/ClientInjectClosed") + }() + + // Test Point 2: We should tolerate the error ErrPeerMessageClientClosed + for i := 0; i < 6; i++ { + err = agent.Tick(suite.cdcCtx) + require.NoError(t, err) + } + + select { + case <-suite.ctx.Done(): + require.Fail(t, "context should not be canceled") + case syncMsg := <-suite.syncCh: + require.Equal(t, &model.SyncMessage{ + ProcessorVersion: version.ReleaseSemver(), + Running: nil, + Adding: nil, + Removing: nil, + }, syncMsg) + } +} diff --git a/cdc/processor/pipeline/sorter.go b/cdc/processor/pipeline/sorter.go index 20189460e07..2f7c19c963b 100644 --- a/cdc/processor/pipeline/sorter.go +++ b/cdc/processor/pipeline/sorter.go @@ -79,6 +79,7 @@ func newSorterNode( flowController: flowController, mounter: mounter, resolvedTs: startTs, + barrierTs: startTs, replConfig: replConfig, } } @@ -101,7 +102,7 @@ func (n *sorterNode) Init(ctx pipeline.NodeContext) error { startTs := ctx.ChangefeedVars().Info.StartTs actorID := ctx.GlobalVars().SorterSystem.ActorID(uint64(n.tableID)) router := ctx.GlobalVars().SorterSystem.Router() - levelSorter := leveldb.NewLevelDBSorter(ctx, n.tableID, startTs, router, actorID) + levelSorter := leveldb.NewSorter(ctx, n.tableID, startTs, router, actorID) n.cleanID = actorID n.cleanTask = levelSorter.CleanupTask() n.cleanRouter = ctx.GlobalVars().SorterSystem.CleanerRouter() diff --git a/cdc/processor/pipeline/sorter_test.go b/cdc/processor/pipeline/sorter_test.go index 3b20ab9db96..32de1e92b96 100644 --- a/cdc/processor/pipeline/sorter_test.go +++ b/cdc/processor/pipeline/sorter_test.go @@ -104,7 +104,8 @@ func (c *checkSorter) Output() <-chan *model.PolymorphicEvent { func TestSorterResolvedTsLessEqualBarrierTs(t *testing.T) { t.Parallel() - s := &checkSorter{ch: make(chan *model.PolymorphicEvent, 1)} + sch := make(chan *model.PolymorphicEvent, 1) + s := &checkSorter{ch: sch} sn := newSorterNode("tableName", 1, 1, nil, nil, &config.ReplicaConfig{ Consistent: &config.ConsistentConfig{}, }) @@ -112,12 +113,22 @@ func TestSorterResolvedTsLessEqualBarrierTs(t *testing.T) { ch := make(chan pipeline.Message, 1) require.EqualValues(t, 1, sn.ResolvedTs()) + + // Resolved ts must not regress even if there is no barrier ts message. + resolvedTs1 := pipeline.PolymorphicEventMessage(model.NewResolvedPolymorphicEvent(0, 1)) nctx := pipeline.NewNodeContext( + cdcContext.NewContext(context.Background(), nil), resolvedTs1, ch) + err := sn.Receive(nctx) + require.Nil(t, err) + require.EqualValues(t, model.NewResolvedPolymorphicEvent(0, 1), <-sch) + + // Advance barrier ts. + nctx = pipeline.NewNodeContext( cdcContext.NewContext(context.Background(), nil), pipeline.BarrierMessage(2), ch, ) - err := sn.Receive(nctx) + err = sn.Receive(nctx) require.Nil(t, err) require.EqualValues(t, 2, sn.barrierTs) // Barrier message must be passed to the next node. diff --git a/cdc/processor/processor.go b/cdc/processor/processor.go index a682b05f53f..cb2a28c9b4e 100644 --- a/cdc/processor/processor.go +++ b/cdc/processor/processor.go @@ -919,7 +919,7 @@ func (p *processor) createTablePipelineImpl(ctx cdcContext.Context, tableID mode } markTableID = tableInfo.ID return nil - }, retry.WithBackoffMaxDelay(50), retry.WithBackoffMaxDelay(60*1000), retry.WithMaxTries(20)) + }, retry.WithBackoffBaseDelay(50), retry.WithBackoffMaxDelay(60*1000), retry.WithMaxTries(20)) if err != nil { return nil, errors.Trace(err) } diff --git a/cdc/processor/processor_test.go b/cdc/processor/processor_test.go index d7c89bd135f..b171473d167 100644 --- a/cdc/processor/processor_test.go +++ b/cdc/processor/processor_test.go @@ -76,7 +76,7 @@ func initProcessor4Test(ctx cdcContext.Context, c *check.C) (*processor, *orches p.changefeed = orchestrator.NewChangefeedReactorState(ctx.ChangefeedVars().ID) return p, orchestrator.NewReactorStateTester(c, p.changefeed, map[string]string{ "/tidb/cdc/capture/" + ctx.GlobalVars().CaptureInfo.ID: `{"id":"` + ctx.GlobalVars().CaptureInfo.ID + `","address":"127.0.0.1:8300"}`, - "/tidb/cdc/changefeed/info/" + ctx.ChangefeedVars().ID: `{"sink-uri":"blackhole://","opts":{},"create-time":"2020-02-02T00:00:00.000000+00:00","start-ts":0,"target-ts":0,"admin-job-type":0,"sort-engine":"memory","sort-dir":".","config":{"case-sensitive":true,"enable-old-value":false,"force-replicate":false,"check-gc-safe-point":true,"filter":{"rules":["*.*"],"ignore-txn-start-ts":null,"ddl-allow-list":null},"mounter":{"worker-num":16},"sink":{"dispatchers":null,"protocol":"default"},"cyclic-replication":{"enable":false,"replica-id":0,"filter-replica-ids":null,"id-buckets":0,"sync-ddl":false},"scheduler":{"type":"table-number","polling-time":-1}},"state":"normal","history":null,"error":null,"sync-point-enabled":false,"sync-point-interval":600000000000}`, + "/tidb/cdc/changefeed/info/" + ctx.ChangefeedVars().ID: `{"sink-uri":"blackhole://","opts":{},"create-time":"2020-02-02T00:00:00.000000+00:00","start-ts":0,"target-ts":0,"admin-job-type":0,"sort-engine":"memory","sort-dir":".","config":{"case-sensitive":true,"enable-old-value":false,"force-replicate":false,"check-gc-safe-point":true,"filter":{"rules":["*.*"],"ignore-txn-start-ts":null,"ddl-allow-list":null},"mounter":{"worker-num":16},"sink":{"dispatchers":null,"protocol":"open-protocol"},"cyclic-replication":{"enable":false,"replica-id":0,"filter-replica-ids":null,"id-buckets":0,"sync-ddl":false},"scheduler":{"type":"table-number","polling-time":-1}},"state":"normal","history":null,"error":null,"sync-point-enabled":false,"sync-point-interval":600000000000}`, "/tidb/cdc/job/" + ctx.ChangefeedVars().ID: `{"resolved-ts":0,"checkpoint-ts":0,"admin-job-type":0}`, "/tidb/cdc/task/status/" + ctx.GlobalVars().CaptureInfo.ID + "/" + ctx.ChangefeedVars().ID: `{"tables":{},"operation":null,"admin-job-type":0}`, }) diff --git a/cdc/sink/codec/canal.go b/cdc/sink/codec/canal.go index 06f518d7568..8a7a28cd4c7 100644 --- a/cdc/sink/codec/canal.go +++ b/cdc/sink/codec/canal.go @@ -16,6 +16,7 @@ package codec import ( "context" "fmt" + "math" "strconv" "strings" @@ -24,7 +25,7 @@ import ( "github.com/pingcap/log" mm "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" - parser_types "github.com/pingcap/tidb/parser/types" + "github.com/pingcap/tidb/types" "github.com/pingcap/tiflow/cdc/model" "github.com/pingcap/tiflow/pkg/config" cerror "github.com/pingcap/tiflow/pkg/errors" @@ -60,7 +61,7 @@ func convertRowEventType(e *model.RowChangedEvent) canal.EventType { // get the canal EventType according to the DDLEvent func convertDdlEventType(e *model.DDLEvent) canal.EventType { - // see https://github.com/alibaba/canal/blob/d53bfd7ee76f8fe6eb581049d64b07d4fcdd692d/parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/ddl/DruidDdlParser.java + // see https://github.com/alibaba/canal/blob/d53bfd7ee76f8fe6eb581049d64b07d4fcdd692d/parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/ddl/DruidDdlParser.java#L59-L178 switch e.Type { case mm.ActionCreateSchema, mm.ActionDropSchema, mm.ActionShardRowID, mm.ActionCreateView, mm.ActionDropView, mm.ActionRecoverTable, mm.ActionModifySchemaCharsetAndCollate, @@ -91,9 +92,7 @@ func convertDdlEventType(e *model.DDLEvent) canal.EventType { } func isCanalDdl(t canal.EventType) bool { - // EventType_QUERY is not a ddl type in canal, but in cdc it is. - // see https://github.com/alibaba/canal/blob/d53bfd7ee76f8fe6eb581049d64b07d4fcdd692d/parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/ddl/DruidDdlParser.java - // & https://github.com/alibaba/canal/blob/d53bfd7ee76f8fe6eb581049d64b07d4fcdd692d/parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/dbsync/LogEventConvert.java#L278 + // see https://github.com/alibaba/canal/blob/b54bea5e3337c9597c427a53071d214ff04628d1/parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/dbsync/LogEventConvert.java#L297 switch t { case canal.EventType_CREATE, canal.EventType_RENAME, @@ -101,7 +100,8 @@ func isCanalDdl(t canal.EventType) bool { canal.EventType_DINDEX, canal.EventType_ALTER, canal.EventType_ERASE, - canal.EventType_TRUNCATE: + canal.EventType_TRUNCATE, + canal.EventType_QUERY: return true } return false @@ -133,84 +133,163 @@ func (b *canalEntryBuilder) buildHeader(commitTs uint64, schema string, table st return h } -// build the Column in the canal RowData -func (b *canalEntryBuilder) buildColumn(c *model.Column, colName string, updated bool) (*canal.Column, error) { - sqlType := MysqlToJavaType(c.Type) - mysqlType := parser_types.TypeStr(c.Type) - if c.Flag.IsBinary() { - if parser_types.IsTypeBlob(c.Type) { - mysqlType = strings.Replace(mysqlType, "text", "blob", 1) - } else if parser_types.IsTypeChar(c.Type) { - mysqlType = strings.Replace(mysqlType, "char", "binary", 1) +func getJavaSQLType(c *model.Column, mysqlType string) (result JavaSQLType) { + javaType := mySQLType2JavaType(c.Type, c.Flag.IsBinary()) + + switch javaType { + case JavaSQLTypeBINARY, JavaSQLTypeVARBINARY, JavaSQLTypeLONGVARBINARY: + if strings.Contains(mysqlType, "text") { + return JavaSQLTypeCLOB } + return JavaSQLTypeBLOB + } + + // flag `isUnsigned` only for `numerical` and `bit`, `year` data type. + if !c.Flag.IsUnsigned() { + return javaType + } + + // for year, to `int64`, others to `uint64`. + // no need to promote type for `year` and `bit` + if c.Type == mysql.TypeYear || c.Type == mysql.TypeBit { + return javaType + } + + if c.Type == mysql.TypeFloat || c.Type == mysql.TypeDouble || c.Type == mysql.TypeNewDecimal { + return javaType + } + + // for **unsigned** integral types, should have type in `uint64`. see reference: + // https://github.com/pingcap/ticdc/blob/f0a38a7aaf9f3b11a4d807da275b567642733f58/cdc/entry/mounter.go#L493 + // https://github.com/pingcap/tidb/blob/6495a5a116a016a3e077d181b8c8ad81f76ac31b/types/datum.go#L423-L455 + number, ok := c.Value.(uint64) + if !ok { + log.Panic("unsigned value not in type uint64", zap.Any("column", c)) } + // Some special cases handled in canal // see https://github.com/alibaba/canal/blob/d53bfd7ee76f8fe6eb581049d64b07d4fcdd692d/parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/dbsync/LogEventConvert.java#L733 + // For unsigned type, promote by the following rule: + // TinyInt, 1byte, [-128, 127], [0, 255], if a > 127 + // SmallInt, 2byte, [-32768, 32767], [0, 65535], if a > 32767 + // Int, 4byte, [-2147483648, 2147483647], [0, 4294967295], if a > 2147483647 + // BigInt, 8byte, [-2<<63, 2 << 63 - 1], [0, 2 << 64 - 1], if a > 2 << 63 - 1 switch c.Type { - // Since we cannot get the signed/unsigned flag of the column in the RowChangedEvent currently, - // we promote the sqlTypes regardless of the flag. case mysql.TypeTiny: - sqlType = JavaSQLTypeSMALLINT + if number > math.MaxInt8 { + javaType = JavaSQLTypeSMALLINT + } case mysql.TypeShort: - sqlType = JavaSQLTypeINTEGER - case mysql.TypeInt24: - sqlType = JavaSQLTypeINTEGER + if number > math.MaxInt16 { + javaType = JavaSQLTypeINTEGER + } case mysql.TypeLong: - sqlType = JavaSQLTypeBIGINT + if number > math.MaxInt32 { + javaType = JavaSQLTypeBIGINT + } case mysql.TypeLonglong: - sqlType = JavaSQLTypeDECIMAL - } - switch sqlType { - case JavaSQLTypeBINARY, JavaSQLTypeVARBINARY, JavaSQLTypeLONGVARBINARY: - if c.Flag.IsBinary() { - sqlType = JavaSQLTypeBLOB - } else { - // In jdbc, text type is mapping to JavaSQLTypeVARCHAR - // see https://dev.mysql.com/doc/connector-j/5.1/en/connector-j-reference-type-conversions.html - sqlType = JavaSQLTypeVARCHAR + if number > math.MaxInt64 { + javaType = JavaSQLTypeDECIMAL } } - isKey := c.Flag.IsPrimaryKey() - isNull := c.Value == nil - value := "" - if !isNull { - switch v := c.Value.(type) { - case int64: - value = strconv.FormatInt(v, 10) - case uint64: - value = strconv.FormatUint(v, 10) - case float32: - value = strconv.FormatFloat(float64(v), 'f', -1, 32) - case float64: - value = strconv.FormatFloat(v, 'f', -1, 64) - case string: - value = v - case []byte: - // special handle for text and blob - // see https://github.com/alibaba/canal/blob/9f6021cf36f78cc8ac853dcf37a1769f359b868b/parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/dbsync/LogEventConvert.java#L801 - switch sqlType { - case JavaSQLTypeVARCHAR, JavaSQLTypeCHAR: - value = string(v) - default: - decoded, err := b.bytesDecoder.Bytes(v) - if err != nil { - return nil, cerror.WrapError(cerror.ErrCanalDecodeFailed, err) - } - value = string(decoded) - sqlType = JavaSQLTypeBLOB // change sql type to Blob when the type is []byte according to canal - } + return javaType +} + +// In the official canal-json implementation, value were extracted from binlog buffer. +// see https://github.com/alibaba/canal/blob/b54bea5e3337c9597c427a53071d214ff04628d1/dbsync/src/main/java/com/taobao/tddl/dbsync/binlog/event/RowsLogBuffer.java#L276-L1147 +// all value will be represented in string type +// see https://github.com/alibaba/canal/blob/b54bea5e3337c9597c427a53071d214ff04628d1/parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/dbsync/LogEventConvert.java#L760-L855 +func (b *canalEntryBuilder) formatValue(value interface{}, javaType JavaSQLType) (result string, err error) { + // value would be nil, if no value insert for the column. + if value == nil { + return "", nil + } + + switch v := value.(type) { + case int64: + result = strconv.FormatInt(v, 10) + case uint64: + result = strconv.FormatUint(v, 10) + case float32: + result = strconv.FormatFloat(float64(v), 'f', -1, 32) + case float64: + result = strconv.FormatFloat(v, 'f', -1, 64) + case string: + result = v + case []byte: + // JavaSQLTypeVARCHAR / JavaSQLTypeCHAR / JavaSQLTypeBLOB / JavaSQLTypeCLOB / + // special handle for text and blob + // see https://github.com/alibaba/canal/blob/9f6021cf36f78cc8ac853dcf37a1769f359b868b/parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/dbsync/LogEventConvert.java#L801 + switch javaType { + // for normal text + case JavaSQLTypeVARCHAR, JavaSQLTypeCHAR, JavaSQLTypeCLOB: + result = string(v) default: - value = fmt.Sprintf("%v", v) + // JavaSQLTypeBLOB + decoded, err := b.bytesDecoder.Bytes(v) + if err != nil { + return "", err + } + result = string(decoded) } + default: + result = fmt.Sprintf("%v", v) + } + return result, nil +} + +// when encoding the canal format, for unsigned mysql type, add `unsigned` keyword. +// it should have the form `t unsigned`, such as `int unsigned` +func withUnsigned4MySQLType(mysqlType string, unsigned bool) string { + if unsigned && mysqlType != "bit" && mysqlType != "year" { + return mysqlType + " unsigned" + } + return mysqlType +} + +// when decoding the canal format, remove `unsigned` to get the original `mysql type`. +func trimUnsignedFromMySQLType(mysqlType string) string { + return strings.TrimSuffix(mysqlType, " unsigned") +} + +func getMySQLType(c *model.Column) string { + mysqlType := types.TypeStr(c.Type) + // make `mysqlType` representation keep the same as the canal official implementation + mysqlType = withUnsigned4MySQLType(mysqlType, c.Flag.IsUnsigned()) + + if !c.Flag.IsBinary() { + return mysqlType + } + + if types.IsTypeBlob(c.Type) { + return strings.Replace(mysqlType, "text", "blob", 1) + } + + if types.IsTypeChar(c.Type) { + return strings.Replace(mysqlType, "char", "binary", 1) + } + + return mysqlType +} + +// build the Column in the canal RowData +// see https://github.com/alibaba/canal/blob/b54bea5e3337c9597c427a53071d214ff04628d1/parse/src/main/java/com/alibaba/otter/canal/parse/inbound/mysql/dbsync/LogEventConvert.java#L756-L872 +func (b *canalEntryBuilder) buildColumn(c *model.Column, colName string, updated bool) (*canal.Column, error) { + mysqlType := getMySQLType(c) + javaType := getJavaSQLType(c, mysqlType) + + value, err := b.formatValue(c.Value, javaType) + if err != nil { + return nil, cerror.WrapError(cerror.ErrCanalEncodeFailed, err) } canalColumn := &canal.Column{ - SqlType: int32(sqlType), + SqlType: int32(javaType), Name: colName, - IsKey: isKey, + IsKey: c.Flag.IsPrimaryKey(), Updated: updated, - IsNullPresent: &canal.Column_IsNull{IsNull: isNull}, + IsNullPresent: &canal.Column_IsNull{IsNull: c.Value == nil}, Value: value, MysqlType: mysqlType, } diff --git a/cdc/sink/codec/canal_flat.go b/cdc/sink/codec/canal_flat.go index dd52919e2cd..b369215c709 100644 --- a/cdc/sink/codec/canal_flat.go +++ b/cdc/sink/codec/canal_flat.go @@ -82,9 +82,10 @@ type canalFlatMessageInterface interface { getOld() map[string]interface{} getData() map[string]interface{} getMySQLType() map[string]string + getJavaSQLType() map[string]int32 } -// adapted from https://github.com/alibaba/canal/blob/master/protocol/src/main/java/com/alibaba/otter/canal/protocol/FlatMessage.java +// adapted from https://github.com/alibaba/canal/blob/b54bea5e3337c9597c427a53071d214ff04628d1/protocol/src/main/java/com/alibaba/otter/canal/protocol/FlatMessage.java#L1 type canalFlatMessage struct { // ignored by consumers ID int64 `json:"id"` @@ -132,10 +133,16 @@ func (c *canalFlatMessage) getQuery() string { } func (c *canalFlatMessage) getOld() map[string]interface{} { + if c.Old == nil { + return nil + } return c.Old[0] } func (c *canalFlatMessage) getData() map[string]interface{} { + if c.Data == nil { + return nil + } return c.Data[0] } @@ -143,6 +150,10 @@ func (c *canalFlatMessage) getMySQLType() map[string]string { return c.MySQLType } +func (c *canalFlatMessage) getJavaSQLType() map[string]int32 { + return c.SQLType +} + type tidbExtension struct { CommitTs uint64 `json:"commitTs,omitempty"` WatermarkTs uint64 `json:"watermarkTs,omitempty"` @@ -157,38 +168,10 @@ type canalFlatMessageWithTiDBExtension struct { Extensions *tidbExtension `json:"_tidb"` } -func (c *canalFlatMessageWithTiDBExtension) getTikvTs() uint64 { - return c.tikvTs -} - -func (c *canalFlatMessageWithTiDBExtension) getSchema() *string { - return &c.Schema -} - -func (c *canalFlatMessageWithTiDBExtension) getTable() *string { - return &c.Table -} - func (c *canalFlatMessageWithTiDBExtension) getCommitTs() uint64 { return c.Extensions.CommitTs } -func (c *canalFlatMessageWithTiDBExtension) getQuery() string { - return c.Query -} - -func (c *canalFlatMessageWithTiDBExtension) getOld() map[string]interface{} { - return c.Old[0] -} - -func (c *canalFlatMessageWithTiDBExtension) getData() map[string]interface{} { - return c.Data[0] -} - -func (c *canalFlatMessageWithTiDBExtension) getMySQLType() map[string]string { - return c.MySQLType -} - func (c *CanalFlatEventBatchEncoder) newFlatMessageForDML(e *model.RowChangedEvent) (canalFlatMessageInterface, error) { eventType := convertRowEventType(e) header := c.builder.buildHeader(e.CommitTs, e.Table.Schema, e.Table.Table, eventType, 1) @@ -242,10 +225,6 @@ func (c *CanalFlatEventBatchEncoder) newFlatMessageForDML(e *model.RowChangedEve data[rowData.AfterColumns[i].Name] = nil } } - } else { - // The event type is DELETE - // The following line is important because Alibaba's adapter expects this, and so does Flink. - data = oldData } flatMessage := &canalFlatMessage{ @@ -261,14 +240,20 @@ func (c *CanalFlatEventBatchEncoder) newFlatMessageForDML(e *model.RowChangedEve SQLType: sqlType, MySQLType: mysqlType, Data: make([]map[string]interface{}, 0), - Old: make([]map[string]interface{}, 0), + Old: nil, tikvTs: e.CommitTs, } - // We need to ensure that both Data and Old have exactly one element, - // even if the element could be nil. Changing this could break Alibaba's adapter - flatMessage.Data = append(flatMessage.Data, data) - flatMessage.Old = append(flatMessage.Old, oldData) + if e.IsDelete() { + flatMessage.Data = append(flatMessage.Data, oldData) + } else if e.IsInsert() { + flatMessage.Data = append(flatMessage.Data, data) + } else if e.IsUpdate() { + flatMessage.Old = []map[string]interface{}{oldData} + flatMessage.Data = append(flatMessage.Data, data) + } else { + log.Panic("unreachable event type", zap.Any("event", e)) + } if !c.enableTiDBExtension { return flatMessage, nil @@ -509,11 +494,11 @@ func canalFlatMessage2RowChangedEvent(flatMessage canalFlatMessageInterface) (*m } var err error - result.Columns, err = canalFlatJSONColumnMap2SinkColumns(flatMessage.getData(), flatMessage.getMySQLType()) + result.Columns, err = canalFlatJSONColumnMap2SinkColumns(flatMessage.getData(), flatMessage.getMySQLType(), flatMessage.getJavaSQLType()) if err != nil { return nil, err } - result.PreColumns, err = canalFlatJSONColumnMap2SinkColumns(flatMessage.getOld(), flatMessage.getMySQLType()) + result.PreColumns, err = canalFlatJSONColumnMap2SinkColumns(flatMessage.getOld(), flatMessage.getMySQLType(), flatMessage.getJavaSQLType()) if err != nil { return nil, err } @@ -521,21 +506,24 @@ func canalFlatMessage2RowChangedEvent(flatMessage canalFlatMessageInterface) (*m return result, nil } -func canalFlatJSONColumnMap2SinkColumns(cols map[string]interface{}, mysqlType map[string]string) ([]*model.Column, error) { +func canalFlatJSONColumnMap2SinkColumns(cols map[string]interface{}, mysqlType map[string]string, javaSQLType map[string]int32) ([]*model.Column, error) { result := make([]*model.Column, 0, len(cols)) for name, value := range cols { - typeStr, ok := mysqlType[name] + javaType, ok := javaSQLType[name] if !ok { - // this should not happen, else we have to check encoding for mysqlType. + // this should not happen, else we have to check encoding for javaSQLType. return nil, cerrors.ErrCanalDecodeFailed.GenWithStack( - "mysql type does not found, column: %+v, mysqlType: %+v", name, mysqlType) + "java sql type does not found, column: %+v, mysqlType: %+v", name, javaSQLType) } - tp := types.StrToType(typeStr) + mysqlTypeStr, ok := mysqlType[name] if !ok { + // this should not happen, else we have to check encoding for mysqlType. return nil, cerrors.ErrCanalDecodeFailed.GenWithStack( - "mysql type does not found, column: %+v, type: %+v", name, tp) + "mysql type does not found, column: %+v, mysqlType: %+v", name, mysqlType) } - col := NewColumn(value, tp).ToSinkColumn(name) + mysqlTypeStr = trimUnsignedFromMySQLType(mysqlTypeStr) + mysqlType := types.StrToType(mysqlTypeStr) + col := NewColumn(value, mysqlType).decodeCanalJSONColumn(name, JavaSQLType(javaType)) result = append(result, col) } if len(result) == 0 { diff --git a/cdc/sink/codec/canal_flat_test.go b/cdc/sink/codec/canal_flat_test.go index b07490b33bf..25fa8e7aac4 100644 --- a/cdc/sink/codec/canal_flat_test.go +++ b/cdc/sink/codec/canal_flat_test.go @@ -18,7 +18,6 @@ import ( "github.com/pingcap/check" mm "github.com/pingcap/tidb/parser/model" - "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tiflow/cdc/model" "github.com/pingcap/tiflow/pkg/util/testleak" "golang.org/x/text/encoding/charmap" @@ -28,31 +27,39 @@ type canalFlatSuite struct{} var _ = check.Suite(&canalFlatSuite{}) -var testCaseUpdate = &model.RowChangedEvent{ - CommitTs: 417318403368288260, - Table: &model.TableName{ - Schema: "cdc", - Table: "person", - }, - Columns: []*model.Column{ - {Name: "id", Type: mysql.TypeLong, Flag: model.PrimaryKeyFlag, Value: 1}, - {Name: "name", Type: mysql.TypeVarchar, Value: "Bob"}, - {Name: "tiny", Type: mysql.TypeTiny, Value: 255}, - {Name: "comment", Type: mysql.TypeBlob, Value: []byte("测试")}, - {Name: "blob", Type: mysql.TypeBlob, Value: []byte("测试blob"), Flag: model.BinaryFlag}, - {Name: "binaryString", Type: mysql.TypeString, Value: "Chengdu International Airport", Flag: model.BinaryFlag}, - {Name: "binaryBlob", Type: mysql.TypeVarchar, Value: []byte("你好,世界"), Flag: model.BinaryFlag}, - }, - PreColumns: []*model.Column{ - {Name: "id", Type: mysql.TypeLong, Flag: model.HandleKeyFlag, Value: 1}, - {Name: "name", Type: mysql.TypeVarchar, Value: "Alice"}, - {Name: "tiny", Type: mysql.TypeTiny, Value: 255}, - {Name: "comment", Type: mysql.TypeBlob, Value: []byte("测试")}, - {Name: "blob", Type: mysql.TypeBlob, Value: []byte("测试blob"), Flag: model.BinaryFlag}, - {Name: "binaryString", Type: mysql.TypeString, Value: "Chengdu International Airport", Flag: model.BinaryFlag}, - {Name: "binaryBlob", Type: mysql.TypeVarchar, Value: []byte("你好,世界"), Flag: model.BinaryFlag}, - }, -} +var ( + testColumns = collectAllColumns(testColumnsTable) + + testCaseInsert = &model.RowChangedEvent{ + CommitTs: 417318403368288260, + Table: &model.TableName{ + Schema: "cdc", + Table: "person", + }, + Columns: testColumns, + PreColumns: nil, + } + + testCaseUpdate = &model.RowChangedEvent{ + CommitTs: 417318403368288260, + Table: &model.TableName{ + Schema: "cdc", + Table: "person", + }, + Columns: testColumns, + PreColumns: testColumns, + } + + testCaseDelete = &model.RowChangedEvent{ + CommitTs: 417318403368288260, + Table: &model.TableName{ + Schema: "cdc", + Table: "person", + }, + Columns: nil, + PreColumns: testColumns, + } +) var testCaseDDL = &model.DDLEvent{ CommitTs: 417318403368288260, @@ -76,63 +83,61 @@ func (s *canalFlatSuite) TestSetParams(c *check.C) { c.Assert(encoder.enableTiDBExtension, check.IsTrue) } -func (s *canalFlatSuite) TestNewCanalFlatMessageFromDML(c *check.C) { +func (s *canalFlatSuite) TestNewCanalFlatMessage4DML(c *check.C) { defer testleak.AfterTest(c)() encoder := &CanalFlatEventBatchEncoder{builder: NewCanalEntryBuilder()} c.Assert(encoder, check.NotNil) - message, err := encoder.newFlatMessageForDML(testCaseUpdate) + + message, err := encoder.newFlatMessageForDML(testCaseInsert) c.Assert(err, check.IsNil) + flatMessage, ok := message.(*canalFlatMessage) + c.Assert(ok, check.IsTrue) + c.Assert(flatMessage.Data, check.NotNil) + c.Assert(flatMessage.Old, check.IsNil) + c.Assert(flatMessage.EventType, check.Equals, "INSERT") + c.Assert(flatMessage.ExecutionTime, check.Equals, convertToCanalTs(testCaseInsert.CommitTs)) + c.Assert(flatMessage.tikvTs, check.Equals, testCaseInsert.CommitTs) + c.Assert(flatMessage.Schema, check.Equals, "cdc") + c.Assert(flatMessage.Table, check.Equals, "person") + c.Assert(flatMessage.IsDDL, check.IsFalse) + + // check data is enough + obtainedDataMap := flatMessage.getData() + c.Assert(obtainedDataMap, check.NotNil) + + for _, item := range testColumnsTable { + obtainedValue, ok := obtainedDataMap[item.column.Name] + c.Assert(ok, check.IsTrue) + if !item.column.Flag.IsBinary() { + c.Assert(obtainedValue, check.Equals, item.expectedValue) + continue + } - msg, ok := message.(*canalFlatMessage) + if bytes, ok := item.column.Value.([]byte); ok { + expectedValue, err := charmap.ISO8859_1.NewDecoder().Bytes(bytes) + c.Assert(err, check.IsNil) + c.Assert(obtainedValue, check.Equals, string(expectedValue)) + continue + } + + c.Assert(obtainedValue, check.Equals, item.expectedValue) + } + + message, err = encoder.newFlatMessageForDML(testCaseUpdate) + c.Assert(err, check.IsNil) + flatMessage, ok = message.(*canalFlatMessage) c.Assert(ok, check.IsTrue) - c.Assert(msg.EventType, check.Equals, "UPDATE") - c.Assert(msg.ExecutionTime, check.Equals, convertToCanalTs(testCaseUpdate.CommitTs)) - c.Assert(msg.tikvTs, check.Equals, testCaseUpdate.CommitTs) - c.Assert(msg.Schema, check.Equals, "cdc") - c.Assert(msg.Table, check.Equals, "person") - c.Assert(msg.IsDDL, check.IsFalse) - c.Assert(msg.SQLType, check.DeepEquals, map[string]int32{ - "id": int32(JavaSQLTypeBIGINT), - "name": int32(JavaSQLTypeVARCHAR), - "tiny": int32(JavaSQLTypeSMALLINT), - "comment": int32(JavaSQLTypeVARCHAR), - "blob": int32(JavaSQLTypeBLOB), - "binaryString": int32(JavaSQLTypeCHAR), - "binaryBlob": int32(JavaSQLTypeVARCHAR), - }) - c.Assert(msg.MySQLType, check.DeepEquals, map[string]string{ - "id": "int", - "name": "varchar", - "tiny": "tinyint", - "comment": "text", - "blob": "blob", - "binaryString": "binary", - "binaryBlob": "varbinary", - }) - encodedBytes, err := charmap.ISO8859_1.NewDecoder().Bytes([]byte("测试blob")) + c.Assert(flatMessage.Data, check.NotNil) + c.Assert(flatMessage.Old, check.NotNil) + c.Assert(flatMessage.EventType, check.Equals, "UPDATE") + + message, err = encoder.newFlatMessageForDML(testCaseDelete) c.Assert(err, check.IsNil) - c.Assert(msg.Data, check.DeepEquals, []map[string]interface{}{ - { - "id": "1", - "name": "Bob", - "tiny": "255", - "comment": "测试", - "blob": string(encodedBytes), - "binaryString": "Chengdu International Airport", - "binaryBlob": "你好,世界", - }, - }) - c.Assert(msg.Old, check.DeepEquals, []map[string]interface{}{ - { - "id": "1", - "name": "Alice", - "tiny": "255", - "comment": "测试", - "blob": string(encodedBytes), - "binaryString": "Chengdu International Airport", - "binaryBlob": "你好,世界", - }, - }) + flatMessage, ok = message.(*canalFlatMessage) + c.Assert(ok, check.IsTrue) + c.Assert(flatMessage.Data, check.NotNil) + c.Assert(flatMessage.Old, check.IsNil) + c.Assert(flatMessage.EventType, check.Equals, "DELETE") encoder = &CanalFlatEventBatchEncoder{builder: NewCanalEntryBuilder(), enableTiDBExtension: true} c.Assert(encoder, check.NotNil) @@ -149,23 +154,12 @@ func (s *canalFlatSuite) TestNewCanalFlatMessageFromDML(c *check.C) { func (s *canalFlatSuite) TestNewCanalFlatEventBatchDecoder4RowMessage(c *check.C) { defer testleak.AfterTest(c)() - encodedBytes, err := charmap.ISO8859_1.NewDecoder().Bytes([]byte("测试blob")) - c.Assert(err, check.IsNil) - expected := map[string]interface{}{ - "id": "1", - "name": "Bob", - "tiny": "255", - "comment": "测试", - "blob": string(encodedBytes), - "binaryString": "Chengdu International Airport", - "binaryBlob": "你好,世界", - } - + expectedDecodedValues := collectDecodeValueByColumns(testColumnsTable) for _, encodeEnable := range []bool{false, true} { encoder := &CanalFlatEventBatchEncoder{builder: NewCanalEntryBuilder(), enableTiDBExtension: encodeEnable} c.Assert(encoder, check.NotNil) - result, err := encoder.AppendRowChangedEvent(testCaseUpdate) + result, err := encoder.AppendRowChangedEvent(testCaseInsert) c.Assert(err, check.IsNil) c.Assert(result, check.Equals, EncoderNoOperation) @@ -190,24 +184,19 @@ func (s *canalFlatSuite) TestNewCanalFlatEventBatchDecoder4RowMessage(c *check.C consumed, err := decoder.NextRowChangedEvent() c.Assert(err, check.IsNil) - c.Assert(consumed.Table, check.DeepEquals, testCaseUpdate.Table) + c.Assert(consumed.Table, check.DeepEquals, testCaseInsert.Table) if encodeEnable && decodeEnable { - c.Assert(consumed.CommitTs, check.Equals, testCaseUpdate.CommitTs) + c.Assert(consumed.CommitTs, check.Equals, testCaseInsert.CommitTs) } else { c.Assert(consumed.CommitTs, check.Equals, uint64(0)) } for _, col := range consumed.Columns { - value, ok := expected[col.Name] + expected, ok := expectedDecodedValues[col.Name] c.Assert(ok, check.IsTrue) - if val, ok := col.Value.([]byte); ok { - c.Assert(string(val), check.Equals, value) - } else { - c.Assert(col.Value, check.Equals, value) - } - - for _, item := range testCaseUpdate.Columns { + c.Assert(col.Value, check.Equals, expected) + for _, item := range testCaseInsert.Columns { if item.Name == col.Name { c.Assert(col.Type, check.Equals, item.Type) } diff --git a/cdc/sink/codec/canal_test.go b/cdc/sink/codec/canal_test.go index bf8f0fa113d..8b6354a67dd 100644 --- a/cdc/sink/codec/canal_test.go +++ b/cdc/sink/codec/canal_test.go @@ -31,56 +31,53 @@ type canalBatchSuite struct { } var _ = check.Suite(&canalBatchSuite{ - rowCases: [][]*model.RowChangedEvent{{{ - CommitTs: 1, - Table: &model.TableName{Schema: "a", Table: "b"}, - Columns: []*model.Column{{Name: "col1", Type: 1, Value: "aa"}}, - }}, {{ - CommitTs: 1, - Table: &model.TableName{Schema: "a", Table: "b"}, - Columns: []*model.Column{{Name: "col1", Type: 1, Value: "aa"}}, - }, { - CommitTs: 2, - Table: &model.TableName{Schema: "a", Table: "b"}, - Columns: []*model.Column{{Name: "col1", Type: 1, Value: "bb"}}, - }, { - CommitTs: 3, - Table: &model.TableName{Schema: "a", Table: "b"}, - Columns: []*model.Column{{Name: "col1", Type: 1, Value: "bb"}}, - }, { - CommitTs: 4, - Table: &model.TableName{Schema: "a", Table: "c", TableID: 6, IsPartition: true}, - Columns: []*model.Column{{Name: "col1", Type: 1, Value: "cc"}}, - }}, {}}, - ddlCases: [][]*model.DDLEvent{{{ - CommitTs: 1, - TableInfo: &model.SimpleTableInfo{ - Schema: "a", Table: "b", + rowCases: [][]*model.RowChangedEvent{ + {{ + CommitTs: 1, + Table: &model.TableName{Schema: "a", Table: "b"}, + Columns: []*model.Column{{Name: "col1", Type: 1, Value: "aa"}}, + }}, + { + { + CommitTs: 1, + Table: &model.TableName{Schema: "a", Table: "b"}, + Columns: []*model.Column{{Name: "col1", Type: 1, Value: "aa"}}, + }, + { + CommitTs: 2, + Table: &model.TableName{Schema: "a", Table: "b"}, + Columns: []*model.Column{{Name: "col1", Type: 1, Value: "bb"}}, + }, }, - Query: "create table a", - Type: 1, - }}, {{ - CommitTs: 1, - TableInfo: &model.SimpleTableInfo{ - Schema: "a", Table: "b", + }, + ddlCases: [][]*model.DDLEvent{ + {{ + CommitTs: 1, + TableInfo: &model.SimpleTableInfo{ + Schema: "a", Table: "b", + }, + Query: "create table a", + Type: 1, + }}, + { + { + CommitTs: 2, + TableInfo: &model.SimpleTableInfo{ + Schema: "a", Table: "b", + }, + Query: "create table b", + Type: 3, + }, + { + CommitTs: 3, + TableInfo: &model.SimpleTableInfo{ + Schema: "a", Table: "b", + }, + Query: "create table c", + Type: 3, + }, }, - Query: "create table a", - Type: 1, - }, { - CommitTs: 2, - TableInfo: &model.SimpleTableInfo{ - Schema: "a", Table: "b", - }, - Query: "create table b", - Type: 2, - }, { - CommitTs: 3, - TableInfo: &model.SimpleTableInfo{ - Schema: "a", Table: "b", - }, - Query: "create table c", - Type: 3, - }}, {}}, + }, }) func (s *canalBatchSuite) TestCanalEventBatchEncoder(c *check.C) { @@ -187,7 +184,7 @@ func testInsert(c *check.C) { c.Assert(col.GetUpdated(), check.IsTrue) switch col.GetName() { case "id": - c.Assert(col.GetSqlType(), check.Equals, int32(JavaSQLTypeBIGINT)) + c.Assert(col.GetSqlType(), check.Equals, int32(JavaSQLTypeINTEGER)) c.Assert(col.GetIsKey(), check.IsTrue) c.Assert(col.GetIsNull(), check.IsFalse) c.Assert(col.GetValue(), check.Equals, "1") @@ -199,12 +196,12 @@ func testInsert(c *check.C) { c.Assert(col.GetValue(), check.Equals, "Bob") c.Assert(col.GetMysqlType(), check.Equals, "varchar") case "tiny": - c.Assert(col.GetSqlType(), check.Equals, int32(JavaSQLTypeSMALLINT)) + c.Assert(col.GetSqlType(), check.Equals, int32(JavaSQLTypeTINYINT)) c.Assert(col.GetIsKey(), check.IsFalse) c.Assert(col.GetIsNull(), check.IsFalse) c.Assert(col.GetValue(), check.Equals, "255") case "comment": - c.Assert(col.GetSqlType(), check.Equals, int32(JavaSQLTypeVARCHAR)) + c.Assert(col.GetSqlType(), check.Equals, int32(JavaSQLTypeCLOB)) c.Assert(col.GetIsKey(), check.IsFalse) c.Assert(col.GetIsNull(), check.IsFalse) c.Assert(err, check.IsNil) @@ -264,7 +261,7 @@ func testUpdate(c *check.C) { c.Assert(col.GetUpdated(), check.IsTrue) switch col.GetName() { case "id": - c.Assert(col.GetSqlType(), check.Equals, int32(JavaSQLTypeBIGINT)) + c.Assert(col.GetSqlType(), check.Equals, int32(JavaSQLTypeINTEGER)) c.Assert(col.GetIsKey(), check.IsTrue) c.Assert(col.GetIsNull(), check.IsFalse) c.Assert(col.GetValue(), check.Equals, "2") @@ -284,7 +281,7 @@ func testUpdate(c *check.C) { c.Assert(col.GetUpdated(), check.IsTrue) switch col.GetName() { case "id": - c.Assert(col.GetSqlType(), check.Equals, int32(JavaSQLTypeBIGINT)) + c.Assert(col.GetSqlType(), check.Equals, int32(JavaSQLTypeINTEGER)) c.Assert(col.GetIsKey(), check.IsTrue) c.Assert(col.GetIsNull(), check.IsFalse) c.Assert(col.GetValue(), check.Equals, "1") @@ -334,7 +331,7 @@ func testDelete(c *check.C) { c.Assert(col.GetUpdated(), check.IsFalse) switch col.GetName() { case "id": - c.Assert(col.GetSqlType(), check.Equals, int32(JavaSQLTypeBIGINT)) + c.Assert(col.GetSqlType(), check.Equals, int32(JavaSQLTypeINTEGER)) c.Assert(col.GetIsKey(), check.IsTrue) c.Assert(col.GetIsNull(), check.IsFalse) c.Assert(col.GetValue(), check.Equals, "1") @@ -368,3 +365,95 @@ func testDdl(c *check.C) { c.Assert(rc.GetIsDdl(), check.IsTrue) c.Assert(rc.GetDdlSchemaName(), check.Equals, testCaseDdl.TableInfo.Schema) } + +type testColumnTuple struct { + column *model.Column + expectedMySQLType string + expectedJavaSQLType JavaSQLType + // expectedValue is expected by both encoding and decoding + expectedValue string +} + +func collectAllColumns(groups []*testColumnTuple) []*model.Column { + result := make([]*model.Column, 0, len(groups)) + for _, item := range groups { + result = append(result, item.column) + } + return result +} + +func collectDecodeValueByColumns(columns []*testColumnTuple) map[string]string { + result := make(map[string]string, len(columns)) + for _, item := range columns { + result[item.column.Name] = item.expectedValue + } + return result +} + +var testColumnsTable = []*testColumnTuple{ + {&model.Column{Name: "tinyint", Type: mysql.TypeTiny, Value: int64(127)}, "tinyint", JavaSQLTypeTINYINT, "127"}, // TinyInt + {&model.Column{Name: "tinyint unsigned", Type: mysql.TypeTiny, Value: uint64(127), Flag: model.UnsignedFlag}, "tinyint unsigned", JavaSQLTypeTINYINT, "127"}, + {&model.Column{Name: "tinyint unsigned 2", Type: mysql.TypeTiny, Value: uint64(128), Flag: model.UnsignedFlag}, "tinyint unsigned", JavaSQLTypeSMALLINT, "128"}, + + {&model.Column{Name: "smallint", Type: mysql.TypeShort, Value: int64(32767)}, "smallint", JavaSQLTypeSMALLINT, "32767"}, + {&model.Column{Name: "smallint unsigned", Type: mysql.TypeShort, Value: uint64(32767), Flag: model.UnsignedFlag}, "smallint unsigned", JavaSQLTypeSMALLINT, "32767"}, + {&model.Column{Name: "smallint unsigned 2", Type: mysql.TypeShort, Value: uint64(32768), Flag: model.UnsignedFlag}, "smallint unsigned", JavaSQLTypeINTEGER, "32768"}, + + {&model.Column{Name: "mediumint", Type: mysql.TypeInt24, Value: int64(8388607)}, "mediumint", JavaSQLTypeINTEGER, "8388607"}, + {&model.Column{Name: "mediumint unsigned", Type: mysql.TypeInt24, Value: uint64(8388607), Flag: model.UnsignedFlag}, "mediumint unsigned", JavaSQLTypeINTEGER, "8388607"}, + {&model.Column{Name: "mediumint unsigned 2", Type: mysql.TypeInt24, Value: uint64(8388608), Flag: model.UnsignedFlag}, "mediumint unsigned", JavaSQLTypeINTEGER, "8388608"}, + + {&model.Column{Name: "int", Type: mysql.TypeLong, Value: int64(2147483647)}, "int", JavaSQLTypeINTEGER, "2147483647"}, + {&model.Column{Name: "int unsigned", Type: mysql.TypeLong, Value: uint64(2147483647), Flag: model.UnsignedFlag}, "int unsigned", JavaSQLTypeINTEGER, "2147483647"}, + {&model.Column{Name: "int unsigned 2", Type: mysql.TypeLong, Value: uint64(2147483648), Flag: model.UnsignedFlag}, "int unsigned", JavaSQLTypeBIGINT, "2147483648"}, + + {&model.Column{Name: "bigint", Type: mysql.TypeLonglong, Value: int64(9223372036854775807)}, "bigint", JavaSQLTypeBIGINT, "9223372036854775807"}, + {&model.Column{Name: "bigint unsigned", Type: mysql.TypeLonglong, Value: uint64(9223372036854775807), Flag: model.UnsignedFlag}, "bigint unsigned", JavaSQLTypeBIGINT, "9223372036854775807"}, + {&model.Column{Name: "bigint unsigned 2", Type: mysql.TypeLonglong, Value: uint64(9223372036854775808), Flag: model.UnsignedFlag}, "bigint unsigned", JavaSQLTypeDECIMAL, "9223372036854775808"}, + + {&model.Column{Name: "float", Type: mysql.TypeFloat, Value: 3.14}, "float", JavaSQLTypeREAL, "3.14"}, + {&model.Column{Name: "double", Type: mysql.TypeDouble, Value: 2.71}, "double", JavaSQLTypeDOUBLE, "2.71"}, + {&model.Column{Name: "decimal", Type: mysql.TypeNewDecimal, Value: "2333"}, "decimal", JavaSQLTypeDECIMAL, "2333"}, + + {&model.Column{Name: "float unsigned", Type: mysql.TypeFloat, Value: 3.14, Flag: model.UnsignedFlag}, "float unsigned", JavaSQLTypeREAL, "3.14"}, + {&model.Column{Name: "double unsigned", Type: mysql.TypeDouble, Value: 2.71, Flag: model.UnsignedFlag}, "double unsigned", JavaSQLTypeDOUBLE, "2.71"}, + {&model.Column{Name: "decimal unsigned", Type: mysql.TypeNewDecimal, Value: "2333", Flag: model.UnsignedFlag}, "decimal unsigned", JavaSQLTypeDECIMAL, "2333"}, + + // for column value type in `[]uint8` and have `BinaryFlag`, expectedValue is dummy. + {&model.Column{Name: "varchar", Type: mysql.TypeVarchar, Value: []uint8("测试Varchar")}, "varchar", JavaSQLTypeVARCHAR, "测试Varchar"}, + {&model.Column{Name: "char", Type: mysql.TypeString, Value: []uint8("测试String")}, "char", JavaSQLTypeCHAR, "测试String"}, + {&model.Column{Name: "binary", Type: mysql.TypeString, Value: []uint8("测试Binary"), Flag: model.BinaryFlag}, "binary", JavaSQLTypeBLOB, "测试Binary"}, + {&model.Column{Name: "varbinary", Type: mysql.TypeVarchar, Value: []uint8("测试varbinary"), Flag: model.BinaryFlag}, "varbinary", JavaSQLTypeBLOB, "测试varbinary"}, + + {&model.Column{Name: "tinytext", Type: mysql.TypeTinyBlob, Value: []uint8("测试Tinytext")}, "tinytext", JavaSQLTypeCLOB, "测试Tinytext"}, + {&model.Column{Name: "text", Type: mysql.TypeBlob, Value: []uint8("测试text")}, "text", JavaSQLTypeCLOB, "测试text"}, + {&model.Column{Name: "mediumtext", Type: mysql.TypeMediumBlob, Value: []uint8("测试mediumtext")}, "mediumtext", JavaSQLTypeCLOB, "测试mediumtext"}, + {&model.Column{Name: "longtext", Type: mysql.TypeLongBlob, Value: []uint8("测试longtext")}, "longtext", JavaSQLTypeCLOB, "测试longtext"}, + + {&model.Column{Name: "tinyblob", Type: mysql.TypeTinyBlob, Value: []uint8("测试tinyblob"), Flag: model.BinaryFlag}, "tinyblob", JavaSQLTypeBLOB, "测试tinyblob"}, + {&model.Column{Name: "blob", Type: mysql.TypeBlob, Value: []uint8("测试blob"), Flag: model.BinaryFlag}, "blob", JavaSQLTypeBLOB, "测试blob"}, + {&model.Column{Name: "mediumblob", Type: mysql.TypeMediumBlob, Value: []uint8("测试mediumblob"), Flag: model.BinaryFlag}, "mediumblob", JavaSQLTypeBLOB, "测试mediumblob"}, + {&model.Column{Name: "longblob", Type: mysql.TypeLongBlob, Value: []uint8("测试longblob"), Flag: model.BinaryFlag}, "longblob", JavaSQLTypeBLOB, "测试longblob"}, + + {&model.Column{Name: "date", Type: mysql.TypeDate, Value: "2020-02-20"}, "date", JavaSQLTypeDATE, "2020-02-20"}, + {&model.Column{Name: "datetime", Type: mysql.TypeDatetime, Value: "2020-02-20 02:20:20"}, "datetime", JavaSQLTypeTIMESTAMP, "2020-02-20 02:20:20"}, + {&model.Column{Name: "timestamp", Type: mysql.TypeTimestamp, Value: "2020-02-20 10:20:20"}, "timestamp", JavaSQLTypeTIMESTAMP, "2020-02-20 10:20:20"}, + {&model.Column{Name: "time", Type: mysql.TypeDuration, Value: "02:20:20"}, "time", JavaSQLTypeTIME, "02:20:20"}, + {&model.Column{Name: "year", Type: mysql.TypeYear, Value: "2020", Flag: model.UnsignedFlag}, "year", JavaSQLTypeVARCHAR, "2020"}, + + {&model.Column{Name: "enum", Type: mysql.TypeEnum, Value: uint64(1)}, "enum", JavaSQLTypeINTEGER, "1"}, + {&model.Column{Name: "set", Type: mysql.TypeSet, Value: uint64(3)}, "set", JavaSQLTypeBIT, "3"}, + {&model.Column{Name: "bit", Type: mysql.TypeBit, Value: uint64(65), Flag: model.UnsignedFlag | model.BinaryFlag}, "bit", JavaSQLTypeBIT, "65"}, + {&model.Column{Name: "json", Type: mysql.TypeJSON, Value: "{\"key1\": \"value1\"}", Flag: model.BinaryFlag}, "json", JavaSQLTypeVARCHAR, "{\"key1\": \"value1\"}"}, +} + +func (s *canalEntrySuite) TestGetMySQLTypeAndJavaSQLType(c *check.C) { + defer testleak.AfterTest(c)() + for _, item := range testColumnsTable { + obtainedMySQLType := getMySQLType(item.column) + c.Assert(obtainedMySQLType, check.Equals, item.expectedMySQLType) + + obtainedJavaSQLType := getJavaSQLType(item.column, obtainedMySQLType) + c.Assert(obtainedJavaSQLType, check.Equals, item.expectedJavaSQLType) + } +} diff --git a/cdc/sink/codec/java.go b/cdc/sink/codec/java.go index b2217b4145b..8f2169895fe 100644 --- a/cdc/sink/codec/java.go +++ b/cdc/sink/codec/java.go @@ -30,7 +30,6 @@ const ( JavaSQLTypeDECIMAL JavaSQLType = 3 JavaSQLTypeCHAR JavaSQLType = 1 JavaSQLTypeVARCHAR JavaSQLType = 12 - JavaSQLTypeLONGVARCHAR JavaSQLType = -1 JavaSQLTypeDATE JavaSQLType = 91 JavaSQLTypeTIME JavaSQLType = 92 JavaSQLTypeTIMESTAMP JavaSQLType = 93 @@ -39,8 +38,10 @@ const ( JavaSQLTypeLONGVARBINARY JavaSQLType = -4 JavaSQLTypeNULL JavaSQLType = 0 JavaSQLTypeBLOB JavaSQLType = 2004 + JavaSQLTypeCLOB JavaSQLType = 2005 // unused + // JavaSQLTypeLONGVARCHAR JavaSQLType = -1 // JavaSQLTypeFLOAT JavaSQLType = 6 // JavaSQLTypeNUMERIC JavaSQLType = 2 // JavaSQLTypeOTHER JavaSQLType = 1111 @@ -48,7 +49,6 @@ const ( // JavaSQLTypeDISTINCT JavaSQLType = 2001 // JavaSQLTypeSTRUCT JavaSQLType = 2002 // JavaSQLTypeARRAY JavaSQLType = 2003 - // JavaSQLTypeCLOB JavaSQLType = 2005 // JavaSQLTypeREF JavaSQLType = 2006 // JavaSQLTypeDATALINK JavaSQLType = 70 // JavaSQLTypeBOOLEAN JavaSQLType = 16 @@ -63,13 +63,10 @@ const ( // JavaSQLTypeTIMESTAMP_WITH_TIMEZONE JavaSQLType = 2014 ) -// MysqlToJavaType converts the mysql protocol types to java sql types -func MysqlToJavaType(mysqlType byte) JavaSQLType { - // see https://github.com/mysql/mysql-connector-j/blob/5.1.49/src/com/mysql/jdbc/MysqlDefs.java +// mySQLType2JavaType converts the mysql protocol types to java sql types +// see https://github.com/alibaba/canal/blob/b54bea5e3337c9597c427a53071d214ff04628d1/dbsync/src/main/java/com/taobao/tddl/dbsync/binlog/event/RowsLogBuffer.java#L132-L269 +func mySQLType2JavaType(mysqlType byte, isBinary bool) JavaSQLType { switch mysqlType { - case mysql.TypeNewDecimal: - return JavaSQLTypeDECIMAL - case mysql.TypeTiny: return JavaSQLTypeTINYINT @@ -88,7 +85,10 @@ func MysqlToJavaType(mysqlType byte) JavaSQLType { case mysql.TypeNull: return JavaSQLTypeNULL - case mysql.TypeTimestamp: + case mysql.TypeNewDecimal: + return JavaSQLTypeDECIMAL + + case mysql.TypeTimestamp, mysql.TypeDatetime: return JavaSQLTypeTIMESTAMP case mysql.TypeLonglong: @@ -97,47 +97,40 @@ func MysqlToJavaType(mysqlType byte) JavaSQLType { case mysql.TypeInt24: return JavaSQLTypeINTEGER - case mysql.TypeDate: + case mysql.TypeDate, mysql.TypeNewDate: return JavaSQLTypeDATE case mysql.TypeDuration: return JavaSQLTypeTIME - case mysql.TypeDatetime: - return JavaSQLTypeTIMESTAMP - case mysql.TypeYear: - return JavaSQLTypeDATE - - case mysql.TypeNewDate: - return JavaSQLTypeDATE + return JavaSQLTypeVARCHAR case mysql.TypeEnum: - return JavaSQLTypeCHAR + return JavaSQLTypeINTEGER case mysql.TypeSet: - return JavaSQLTypeCHAR + return JavaSQLTypeBIT + // Blob related is not identical to the official implementation, since we do not know `meta` at the moment. + // see https://github.com/alibaba/canal/blob/b54bea5e3337c9597c427a53071d214ff04628d1/dbsync/src/main/java/com/taobao/tddl/dbsync/binlog/event/RowsLogBuffer.java#L222-L231 + // But this does not matter, they will be `JavaSQLTypeBlob` or `JavaSQLTypeClob` finally. case mysql.TypeTinyBlob: return JavaSQLTypeVARBINARY - case mysql.TypeMediumBlob: - return JavaSQLTypeLONGVARBINARY - - case mysql.TypeLongBlob: - return JavaSQLTypeLONGVARBINARY - - case mysql.TypeBlob: + case mysql.TypeMediumBlob, mysql.TypeLongBlob, mysql.TypeBlob: return JavaSQLTypeLONGVARBINARY case mysql.TypeVarString, mysql.TypeVarchar: + if isBinary { + return JavaSQLTypeVARBINARY + } return JavaSQLTypeVARCHAR - case mysql.TypeJSON: - // json: see jdbc 8.0, https://github.com/mysql/mysql-connector-j/blob/8.0.20/src/main/core-api/java/com/mysql/cj/MysqlType.java - return JavaSQLTypeLONGVARCHAR - case mysql.TypeString: + if isBinary { + return JavaSQLTypeBINARY + } return JavaSQLTypeCHAR case mysql.TypeGeometry: @@ -146,6 +139,9 @@ func MysqlToJavaType(mysqlType byte) JavaSQLType { case mysql.TypeBit: return JavaSQLTypeBIT + case mysql.TypeJSON: + return JavaSQLTypeVARCHAR + default: return JavaSQLTypeVARCHAR } diff --git a/cdc/sink/codec/json.go b/cdc/sink/codec/json.go index 84d0262bfcd..d52509f0164 100644 --- a/cdc/sink/codec/json.go +++ b/cdc/sink/codec/json.go @@ -31,6 +31,7 @@ import ( "github.com/pingcap/tiflow/pkg/config" cerror "github.com/pingcap/tiflow/pkg/errors" "go.uber.org/zap" + "golang.org/x/text/encoding/charmap" ) const ( @@ -91,6 +92,37 @@ func (c *column) FromSinkColumn(col *model.Column) { } } +func (c *column) decodeCanalJSONColumn(name string, javaType JavaSQLType) *model.Column { + col := new(model.Column) + col.Type = c.Type + col.Flag = c.Flag + col.Name = name + col.Value = c.Value + if c.Value == nil { + return col + } + + value, ok := col.Value.(string) + if !ok { + log.Panic("canal-json encoded message should have type in `string`") + } + + if javaType != JavaSQLTypeBLOB { + col.Value = value + return col + } + + // when encoding the `JavaSQLTypeBLOB`, use `ISO8859_1` decoder, now reverse it back. + encoder := charmap.ISO8859_1.NewEncoder() + value, err := encoder.String(value) + if err != nil { + log.Panic("invalid column value, please report a bug", zap.Any("col", c), zap.Error(err)) + } + + col.Value = value + return col +} + func (c *column) ToSinkColumn(name string) *model.Column { col := new(model.Column) col.Type = c.Type diff --git a/cdc/sink/mq_test.go b/cdc/sink/mq_test.go index 537bc7a69e6..417049c6552 100644 --- a/cdc/sink/mq_test.go +++ b/cdc/sink/mq_test.go @@ -54,7 +54,7 @@ func (s mqSinkSuite) TestKafkaSink(c *check.C) { uriTemplate := "kafka://%s/%s?kafka-version=0.9.0.0&max-batch-size=1" + "&max-message-bytes=1048576&partition-num=1" + - "&kafka-client-id=unit-test&auto-create-topic=false&compression=gzip&protocol=default" + "&kafka-client-id=unit-test&auto-create-topic=false&compression=gzip&protocol=open-protocol" uri := fmt.Sprintf(uriTemplate, leader.Addr(), topic) sinkURI, err := url.Parse(uri) c.Assert(err, check.IsNil) @@ -158,7 +158,7 @@ func (s mqSinkSuite) TestKafkaSinkFilter(c *check.C) { prodSuccess := new(sarama.ProduceResponse) prodSuccess.AddTopicPartition(topic, 0, sarama.ErrNoError) - uriTemplate := "kafka://%s/%s?kafka-version=0.9.0.0&auto-create-topic=false&protocol=default" + uriTemplate := "kafka://%s/%s?kafka-version=0.9.0.0&auto-create-topic=false&protocol=open-protocol" uri := fmt.Sprintf(uriTemplate, leader.Addr(), topic) sinkURI, err := url.Parse(uri) c.Assert(err, check.IsNil) @@ -257,7 +257,7 @@ func (s mqSinkSuite) TestFlushRowChangedEvents(c *check.C) { uriTemplate := "kafka://%s/%s?kafka-version=0.9.0.0&max-batch-size=1" + "&max-message-bytes=1048576&partition-num=1" + - "&kafka-client-id=unit-test&auto-create-topic=false&compression=gzip&protocol=default" + "&kafka-client-id=unit-test&auto-create-topic=false&compression=gzip&protocol=open-protocol" uri := fmt.Sprintf(uriTemplate, leader.Addr(), topic) sinkURI, err := url.Parse(uri) c.Assert(err, check.IsNil) diff --git a/cdc/sorter/leveldb/cleaner.go b/cdc/sorter/leveldb/cleaner.go index b6a947198da..3e8eee9256a 100644 --- a/cdc/sorter/leveldb/cleaner.go +++ b/cdc/sorter/leveldb/cleaner.go @@ -32,9 +32,13 @@ import ( // CleanerActor is an actor that can clean up table data asynchronously. type CleanerActor struct { - id actor.ID - db db.DB - wbSize int + id actor.ID + db db.DB + wbSize int + + deleteCount int + compact *CompactScheduler + closedWg *sync.WaitGroup limiter *rate.Limiter @@ -45,7 +49,7 @@ var _ actor.Actor = (*CleanerActor)(nil) // NewCleanerActor returns a cleaner actor. func NewCleanerActor( - id int, db db.DB, router *actor.Router, + id int, db db.DB, router *actor.Router, compact *CompactScheduler, cfg *config.DBConfig, wg *sync.WaitGroup, ) (*CleanerActor, actor.Mailbox, error) { wg.Add(1) @@ -61,6 +65,7 @@ func NewCleanerActor( id: actor.ID(id), db: db, wbSize: wbSize, + compact: compact, closedWg: wg, limiter: limiter, router: router, @@ -193,11 +198,17 @@ func (clean *CleanerActor) writeRateLimited( } } } + clean.deleteCount += int(batch.Count()) err := batch.Commit() if err != nil { return 0, errors.Trace(err) } batch.Reset() + // Schedule a compact task when there are too many deletion. + if clean.compact.maybeCompact(clean.id, clean.deleteCount) { + // Reset delete key count if schedule compaction successfully. + clean.deleteCount = 0 + } return 0, nil } diff --git a/cdc/sorter/leveldb/cleaner_test.go b/cdc/sorter/leveldb/cleaner_test.go index b86c6b10a23..5d638ce34a6 100644 --- a/cdc/sorter/leveldb/cleaner_test.go +++ b/cdc/sorter/leveldb/cleaner_test.go @@ -67,7 +67,8 @@ func TestCleanerPoll(t *testing.T) { db, err := db.OpenLevelDB(ctx, 1, t.TempDir(), cfg) require.Nil(t, err) closedWg := new(sync.WaitGroup) - clean, _, err := NewCleanerActor(1, db, nil, cfg, closedWg) + compact := NewCompactScheduler(actor.NewRouter(t.Name()), cfg) + clean, _, err := NewCleanerActor(1, db, nil, compact, cfg, closedWg) require.Nil(t, err) // Put data to db. @@ -163,12 +164,13 @@ func TestCleanerContextCancel(t *testing.T) { db, err := db.OpenLevelDB(ctx, 1, t.TempDir(), cfg) require.Nil(t, err) closedWg := new(sync.WaitGroup) - ldb, _, err := NewCleanerActor(0, db, nil, cfg, closedWg) + compact := NewCompactScheduler(actor.NewRouter(t.Name()), cfg) + clean, _, err := NewCleanerActor(1, db, nil, compact, cfg, closedWg) require.Nil(t, err) cancel() tasks := makeCleanTask(1, 1) - closed := !ldb.Poll(ctx, tasks) + closed := !clean.Poll(ctx, tasks) require.True(t, closed) closedWg.Wait() require.Nil(t, db.Close()) @@ -185,7 +187,8 @@ func TestCleanerWriteRateLimited(t *testing.T) { db, err := db.OpenLevelDB(ctx, 1, t.TempDir(), cfg) require.Nil(t, err) closedWg := new(sync.WaitGroup) - clean, _, err := NewCleanerActor(1, db, nil, cfg, closedWg) + compact := NewCompactScheduler(actor.NewRouter(t.Name()), cfg) + clean, _, err := NewCleanerActor(1, db, nil, compact, cfg, closedWg) require.Nil(t, err) // Put data to db. @@ -262,8 +265,9 @@ func TestCleanerTaskRescheduled(t *testing.T) { db, err := db.OpenLevelDB(ctx, 1, t.TempDir(), cfg) require.Nil(t, err) closedWg := new(sync.WaitGroup) - router := actor.NewRouter("test") - clean, mb, err := NewCleanerActor(1, db, router, cfg, closedWg) + router := actor.NewRouter(t.Name()) + compact := NewCompactScheduler(actor.NewRouter(t.Name()), cfg) + clean, mb, err := NewCleanerActor(1, db, router, compact, cfg, closedWg) require.Nil(t, err) router.InsertMailbox4Test(actor.ID(1), mb) require.Nil(t, router.SendB(ctx, actor.ID(1), actormsg.TickMessage())) @@ -354,3 +358,61 @@ func TestCleanerTaskRescheduled(t *testing.T) { // require.Zero(t, stats.AliveIterators) require.Nil(t, db.Close()) } + +func TestCleanerCompact(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg := config.GetDefaultServerConfig().Clone().Debug.DB + cfg.Count = 1 + + id := 1 + db, err := db.OpenLevelDB(ctx, id, t.TempDir(), cfg) + require.Nil(t, err) + closedWg := new(sync.WaitGroup) + compactRouter := actor.NewRouter(t.Name()) + compactMB := actor.NewMailbox(actor.ID(id), 1) + compactRouter.InsertMailbox4Test(compactMB.ID(), compactMB) + compact := NewCompactScheduler(compactRouter, cfg) + cleaner, _, err := NewCleanerActor(id, db, nil, compact, cfg, closedWg) + require.Nil(t, err) + + // Lower compactThreshold to speed up tests. + compact.compactThreshold = 2 + cleaner.wbSize = 1 + + // Put data to db. + // * 1 key of uid1 table1 + // * 2 key of uid2 table1 + data := [][]int{ + {1, 1, 1}, + {2, 2, 1}, + } + prepareData(t, db, data) + + // Empty task must not trigger compact. + closed := !cleaner.Poll(ctx, makeCleanTask(0, 0)) + require.False(t, closed) + _, ok := compactMB.Receive() + require.False(t, ok) + + // Delete 2 keys must trigger compact. + closed = !cleaner.Poll(ctx, makeCleanTask(2, 1)) + require.False(t, closed) + _, ok = compactMB.Receive() + require.True(t, ok) + + // Delete 1 key must not trigger compact. + closed = !cleaner.Poll(ctx, makeCleanTask(1, 1)) + require.False(t, closed) + _, ok = compactMB.Receive() + require.False(t, ok) + + // Close db. + closed = !cleaner.Poll(ctx, []actormsg.Message{actormsg.StopMessage()}) + require.True(t, closed) + closedWg.Wait() + require.Nil(t, db.Close()) +} diff --git a/cdc/sorter/leveldb/compactor.go b/cdc/sorter/leveldb/compactor.go new file mode 100644 index 00000000000..cc211572817 --- /dev/null +++ b/cdc/sorter/leveldb/compactor.go @@ -0,0 +1,132 @@ +// Copyright 2021 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package leveldb + +import ( + "bytes" + "context" + "strconv" + "sync" + "time" + + "github.com/pingcap/log" + "github.com/pingcap/tiflow/pkg/actor" + actormsg "github.com/pingcap/tiflow/pkg/actor/message" + "github.com/pingcap/tiflow/pkg/config" + "github.com/pingcap/tiflow/pkg/db" + cerrors "github.com/pingcap/tiflow/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "go.uber.org/zap" +) + +// CompactActor is an actor that compacts db. +// It GCs delete kv entries and reclaim disk space. +type CompactActor struct { + id actor.ID + db db.DB + closedWg *sync.WaitGroup + + metricCompactDuration prometheus.Observer +} + +var _ actor.Actor = (*CompactActor)(nil) + +// NewCompactActor returns a compactor actor. +func NewCompactActor( + id int, db db.DB, wg *sync.WaitGroup, captureAddr string, +) (*CompactActor, actor.Mailbox, error) { + wg.Add(1) + idTag := strconv.Itoa(id) + // Compact is CPU intensive, set capacity to 1 to reduce unnecessary tasks. + mb := actor.NewMailbox(actor.ID(id), 1) + return &CompactActor{ + id: actor.ID(id), + db: db, + closedWg: wg, + + metricCompactDuration: sorterCompactDurationHistogram.WithLabelValues(captureAddr, idTag), + }, mb, nil +} + +// Poll implements actor.Actor. +func (c *CompactActor) Poll(ctx context.Context, tasks []actormsg.Message) bool { + select { + case <-ctx.Done(): + c.close(ctx.Err()) + return false + default: + } + + // Only compact once for every batch. + for pos := range tasks { + msg := tasks[pos] + switch msg.Tp { + case actormsg.TypeTick: + case actormsg.TypeStop: + c.close(nil) + return false + default: + log.Panic("unexpected message", zap.Any("message", msg)) + } + } + + // A range that is large enough to cover entire db effectively. + // See see sorter/encoding/key.go. + start, end := []byte{0x0}, bytes.Repeat([]byte{0xff}, 128) + now := time.Now() + if err := c.db.Compact(start, end); err != nil { + log.Error("db compact error", zap.Error(err)) + } + c.metricCompactDuration.Observe(time.Since(now).Seconds()) + + return true +} + +func (c *CompactActor) close(err error) { + log.Info("compactor actor quit", + zap.Uint64("ID", uint64(c.id)), zap.Error(err)) + c.closedWg.Done() +} + +// NewCompactScheduler returns a new compact scheduler. +func NewCompactScheduler( + router *actor.Router, cfg *config.DBConfig, +) *CompactScheduler { + return &CompactScheduler{ + router: router, + compactThreshold: cfg.CompactionDeletionThreshold, + } +} + +// CompactScheduler schedules compact tasks to compactors. +type CompactScheduler struct { + // A router to compactors. + router *actor.Router + // The number of delete keys that triggers compact. + compactThreshold int +} + +func (s *CompactScheduler) maybeCompact(id actor.ID, deleteCount int) bool { + if deleteCount < s.compactThreshold { + return false + } + err := s.router.Send(id, actormsg.TickMessage()) + // An ongoing compaction may block compactor and cause channel full, + // skip send the task as there is a pending task. + if err != nil && cerrors.ErrMailboxFull.NotEqual(err) { + log.Warn("schedule compact failed", zap.Error(err)) + return false + } + return true +} diff --git a/cdc/sorter/leveldb/compactor_test.go b/cdc/sorter/leveldb/compactor_test.go new file mode 100644 index 00000000000..90a755f5979 --- /dev/null +++ b/cdc/sorter/leveldb/compactor_test.go @@ -0,0 +1,113 @@ +// Copyright 2021 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package leveldb + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/pingcap/tiflow/pkg/actor" + actormsg "github.com/pingcap/tiflow/pkg/actor/message" + "github.com/pingcap/tiflow/pkg/config" + "github.com/pingcap/tiflow/pkg/db" + "github.com/stretchr/testify/require" +) + +type mockCompactDB struct { + db.DB + compact chan struct{} +} + +func (m *mockCompactDB) Compact(start, end []byte) error { + m.compact <- struct{}{} + return nil +} + +func TestCompactorPoll(t *testing.T) { + t.Parallel() + ctx := context.Background() + cfg := config.GetDefaultServerConfig().Clone().Debug.DB + cfg.Count = 1 + + db, err := db.OpenLevelDB(ctx, 1, t.TempDir(), cfg) + require.Nil(t, err) + mockDB := mockCompactDB{DB: db, compact: make(chan struct{}, 1)} + closedWg := new(sync.WaitGroup) + compactor, _, err := NewCompactActor(1, &mockDB, closedWg, "") + require.Nil(t, err) + + closed := !compactor.Poll(ctx, []actormsg.Message{actormsg.TickMessage()}) + require.False(t, closed) + select { + case <-time.After(5 * time.Second): + t.Fatal("Must trigger compact") + case <-mockDB.compact: + } + + // Close leveldb. + closed = !compactor.Poll(ctx, []actormsg.Message{actormsg.StopMessage()}) + require.True(t, closed) + closedWg.Wait() + require.Nil(t, db.Close()) +} + +func TestComactorContextCancel(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + cfg := config.GetDefaultServerConfig().Clone().Debug.DB + cfg.Count = 1 + + db, err := db.OpenLevelDB(ctx, 1, t.TempDir(), cfg) + require.Nil(t, err) + closedWg := new(sync.WaitGroup) + ldb, _, err := NewCompactActor(0, db, closedWg, "") + require.Nil(t, err) + + cancel() + closed := !ldb.Poll(ctx, []actormsg.Message{actormsg.TickMessage()}) + require.True(t, closed) + closedWg.Wait() + require.Nil(t, db.Close()) +} + +func TestScheduleCompact(t *testing.T) { + t.Parallel() + router := actor.NewRouter(t.Name()) + mb := actor.NewMailbox(actor.ID(1), 1) + router.InsertMailbox4Test(mb.ID(), mb) + compact := NewCompactScheduler( + router, config.GetDefaultServerConfig().Debug.DB) + compact.compactThreshold = 2 + + // Too few deletion, should not trigger compact. + require.False(t, compact.maybeCompact(mb.ID(), 1)) + _, ok := mb.Receive() + require.False(t, ok) + // Must trigger compact. + require.True(t, compact.maybeCompact(mb.ID(), 3)) + msg, ok := mb.Receive() + require.True(t, ok) + require.EqualValues(t, actormsg.TickMessage(), msg) + + // Skip sending unnecessary tasks. + require.True(t, compact.maybeCompact(mb.ID(), 3)) + require.True(t, compact.maybeCompact(mb.ID(), 3)) + msg, ok = mb.Receive() + require.True(t, ok) + require.EqualValues(t, actormsg.TickMessage(), msg) + _, ok = mb.Receive() + require.False(t, ok) +} diff --git a/cdc/sorter/leveldb/leveldb.go b/cdc/sorter/leveldb/leveldb.go index dcc7091749a..c8ca1a1397e 100644 --- a/cdc/sorter/leveldb/leveldb.go +++ b/cdc/sorter/leveldb/leveldb.go @@ -34,12 +34,16 @@ import ( // DBActor is a db actor, it reads, writes and deletes key value pair in its db. type DBActor struct { - id actor.ID - db db.DB - wb db.Batch - wbSize int - wbCap int - iterSema *semaphore.Weighted + id actor.ID + db db.DB + wb db.Batch + wbSize int + wbCap int + snapSem *semaphore.Weighted + + deleteCount int + compact *CompactScheduler + closedWg *sync.WaitGroup metricWriteDuration prometheus.Observer @@ -50,7 +54,7 @@ var _ actor.Actor = (*DBActor)(nil) // NewDBActor returns a db actor. func NewDBActor( - ctx context.Context, id int, db db.DB, cfg *config.DBConfig, + id int, db db.DB, cfg *config.DBConfig, compact *CompactScheduler, wg *sync.WaitGroup, captureAddr string, ) (*DBActor, actor.Mailbox, error) { idTag := strconv.Itoa(id) @@ -66,18 +70,22 @@ func NewDBActor( iterSema := semaphore.NewWeighted(int64(cfg.Concurrency)) mb := actor.NewMailbox(actor.ID(id), cfg.Concurrency) wg.Add(1) - return &DBActor{ - id: actor.ID(id), - db: db, - wb: wb, - iterSema: iterSema, - wbSize: wbSize, - wbCap: wbCap, + + dba := &DBActor{ + id: actor.ID(id), + db: db, + wb: wb, + snapSem: iterSema, + wbSize: wbSize, + wbCap: wbCap, + compact: compact, + closedWg: wg, metricWriteDuration: sorterWriteDurationHistogram.WithLabelValues(captureAddr, idTag), metricWriteBytes: sorterWriteBytesHistogram.WithLabelValues(captureAddr, idTag), - }, mb, nil + } + return dba, mb, nil } func (ldb *DBActor) close(err error) { @@ -102,6 +110,12 @@ func (ldb *DBActor) maybeWrite(force bool) error { } else { ldb.wb = ldb.db.Batch(ldb.wbCap) } + + // Schedule a compact task when there are too many deletion. + if ldb.compact.maybeCompact(ldb.id, ldb.deleteCount) { + // Reset delete key count if schedule compaction successfully. + ldb.deleteCount = 0 + } } return nil } @@ -136,6 +150,7 @@ func (ldb *DBActor) Poll(ctx context.Context, tasks []actormsg.Message) bool { } else { // Delete the key if value is empty ldb.wb.Delete([]byte(k)) + ldb.deleteCount++ } // Do not force write, batching for efficiency. @@ -154,13 +169,14 @@ func (ldb *DBActor) Poll(ctx context.Context, tasks []actormsg.Message) bool { // Force write only if there is a task requires an iterator. forceWrite := len(snapChs) != 0 - if err := ldb.maybeWrite(forceWrite); err != nil { + err := ldb.maybeWrite(forceWrite) + if err != nil { log.Panic("db error", zap.Error(err)) } // Batch acquire iterators. for i := range snapChs { snapCh := snapChs[i] - err := ldb.iterSema.Acquire(ctx, 1) + err := ldb.snapSem.Acquire(ctx, 1) if err != nil { if errors.Cause(err) == context.Canceled || errors.Cause(err) == context.DeadlineExceeded { @@ -174,7 +190,7 @@ func (ldb *DBActor) Poll(ctx context.Context, tasks []actormsg.Message) bool { } snapCh <- message.LimitedSnapshot{ Snapshot: snap, - Sema: ldb.iterSema, + Sema: ldb.snapSem, } close(snapCh) } diff --git a/cdc/sorter/leveldb/leveldb_test.go b/cdc/sorter/leveldb/leveldb_test.go index f606ff8ac1f..53239d94ac2 100644 --- a/cdc/sorter/leveldb/leveldb_test.go +++ b/cdc/sorter/leveldb/leveldb_test.go @@ -23,6 +23,7 @@ import ( "time" "github.com/pingcap/tiflow/cdc/sorter/leveldb/message" + "github.com/pingcap/tiflow/pkg/actor" actormsg "github.com/pingcap/tiflow/pkg/actor/message" "github.com/pingcap/tiflow/pkg/config" "github.com/pingcap/tiflow/pkg/db" @@ -37,6 +38,8 @@ func TestMain(m *testing.M) { } func TestMaybeWrite(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -46,25 +49,30 @@ func TestMaybeWrite(t *testing.T) { db, err := db.OpenLevelDB(ctx, 1, t.TempDir(), cfg) require.Nil(t, err) closedWg := new(sync.WaitGroup) - ldb, _, err := NewDBActor(ctx, 0, db, cfg, closedWg, "") + compact := NewCompactScheduler(actor.NewRouter(t.Name()), cfg) + ldb, _, err := NewDBActor(0, db, cfg, compact, closedWg, "") require.Nil(t, err) // Empty batch - require.Nil(t, ldb.maybeWrite(false)) + err = ldb.maybeWrite(false) + require.Nil(t, err) // None empty batch ldb.wb.Put([]byte("abc"), []byte("abc")) - require.Nil(t, ldb.maybeWrite(false)) + err = ldb.maybeWrite(false) + require.Nil(t, err) require.EqualValues(t, ldb.wb.Count(), 1) // None empty batch - require.Nil(t, ldb.maybeWrite(true)) + err = ldb.maybeWrite(true) + require.Nil(t, err) require.EqualValues(t, ldb.wb.Count(), 0) ldb.wb.Put([]byte("abc"), []byte("abc")) ldb.wbSize = 1 require.Greater(t, len(ldb.wb.Repr()), ldb.wbSize) - require.Nil(t, ldb.maybeWrite(false)) + err = ldb.maybeWrite(false) + require.Nil(t, err) require.EqualValues(t, ldb.wb.Count(), 0) // Close db. @@ -74,6 +82,62 @@ func TestMaybeWrite(t *testing.T) { require.Nil(t, db.Close()) } +func TestCompact(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg := config.GetDefaultServerConfig().Clone().Debug.DB + cfg.Count = 1 + + id := 1 + db, err := db.OpenLevelDB(ctx, id, t.TempDir(), cfg) + require.Nil(t, err) + closedWg := new(sync.WaitGroup) + compactRouter := actor.NewRouter(t.Name()) + compactMB := actor.NewMailbox(actor.ID(id), 1) + compactRouter.InsertMailbox4Test(compactMB.ID(), compactMB) + compact := NewCompactScheduler(compactRouter, cfg) + ldb, _, err := NewDBActor(id, db, cfg, compact, closedWg, "") + require.Nil(t, err) + + // Lower compactThreshold to speed up tests. + compact.compactThreshold = 2 + + // Empty task must not trigger compact. + task, snapCh := makeTask(make(map[message.Key][]byte), true) + closed := !ldb.Poll(ctx, task) + require.False(t, closed) + <-snapCh + _, ok := compactMB.Receive() + require.False(t, ok) + + // Delete 3 keys must trigger compact. + dels := map[message.Key][]byte{"a": {}, "b": {}, "c": {}} + task, snapCh = makeTask(dels, true) + closed = !ldb.Poll(ctx, task) + require.False(t, closed) + <-snapCh + _, ok = compactMB.Receive() + require.True(t, ok) + + // Delete 1 key must not trigger compact. + dels = map[message.Key][]byte{"a": {}} + task, snapCh = makeTask(dels, true) + closed = !ldb.Poll(ctx, task) + require.False(t, closed) + <-snapCh + _, ok = compactMB.Receive() + require.False(t, ok) + + // Close db. + closed = !ldb.Poll(ctx, []actormsg.Message{actormsg.StopMessage()}) + require.True(t, closed) + closedWg.Wait() + require.Nil(t, db.Close()) +} + func makeTask(events map[message.Key][]byte, needSnap bool) ([]actormsg.Message, chan message.LimitedSnapshot) { snapCh := make(chan message.LimitedSnapshot, 1) return []actormsg.Message{actormsg.SorterMessage(message.Task{ @@ -84,6 +148,8 @@ func makeTask(events map[message.Key][]byte, needSnap bool) ([]actormsg.Message, } func TestPutReadDelete(t *testing.T) { + t.Parallel() + ctx := context.Background() cfg := config.GetDefaultServerConfig().Clone().Debug.DB cfg.Count = 1 @@ -91,7 +157,8 @@ func TestPutReadDelete(t *testing.T) { db, err := db.OpenLevelDB(ctx, 1, t.TempDir(), cfg) require.Nil(t, err) closedWg := new(sync.WaitGroup) - ldb, _, err := NewDBActor(ctx, 0, db, cfg, closedWg, "") + compact := NewCompactScheduler(actor.NewRouter(t.Name()), cfg) + ldb, _, err := NewDBActor(0, db, cfg, compact, closedWg, "") require.Nil(t, err) // Put only. @@ -187,6 +254,8 @@ func (x sortableKeys) Less(i, j int) bool { return bytes.Compare([]byte(x[i]), [ func (x sortableKeys) Swap(i, j int) { x[i], x[j] = x[j], x[i] } func TestModelChecking(t *testing.T) { + t.Parallel() + seed := time.Now().Unix() rd := rand.New(rand.NewSource(seed)) ctx := context.Background() @@ -196,7 +265,8 @@ func TestModelChecking(t *testing.T) { db, err := db.OpenLevelDB(ctx, 1, t.TempDir(), cfg) require.Nil(t, err) closedWg := new(sync.WaitGroup) - ldb, _, err := NewDBActor(ctx, 0, db, cfg, closedWg, "") + compact := NewCompactScheduler(actor.NewRouter(t.Name()), cfg) + ldb, _, err := NewDBActor(0, db, cfg, compact, closedWg, "") require.Nil(t, err) minKey := message.Key("") @@ -275,6 +345,8 @@ func TestModelChecking(t *testing.T) { } func TestContextCancel(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) cfg := config.GetDefaultServerConfig().Clone().Debug.DB cfg.Count = 1 @@ -282,7 +354,8 @@ func TestContextCancel(t *testing.T) { db, err := db.OpenLevelDB(ctx, 1, t.TempDir(), cfg) require.Nil(t, err) closedWg := new(sync.WaitGroup) - ldb, _, err := NewDBActor(ctx, 0, db, cfg, closedWg, "") + compact := NewCompactScheduler(actor.NewRouter(t.Name()), cfg) + ldb, _, err := NewDBActor(0, db, cfg, compact, closedWg, "") require.Nil(t, err) cancel() diff --git a/cdc/sorter/leveldb/metrics.go b/cdc/sorter/leveldb/metrics.go index 92e60480e7c..eb37052741b 100644 --- a/cdc/sorter/leveldb/metrics.go +++ b/cdc/sorter/leveldb/metrics.go @@ -34,6 +34,14 @@ var ( Buckets: prometheus.ExponentialBuckets(0.004, 2.0, 20), }, []string{"capture", "id"}) + sorterCompactDurationHistogram = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: "ticdc", + Subsystem: "sorter", + Name: "db_compact_duration_seconds", + Help: "Bucketed histogram of sorter manual compact duration", + Buckets: prometheus.ExponentialBuckets(0.004, 2.0, 20), + }, []string{"capture", "id"}) + sorterCleanupKVCounter = prometheus.NewCounterVec(prometheus.CounterOpts{ Namespace: "ticdc", Subsystem: "sorter", @@ -45,6 +53,7 @@ var ( // InitMetrics registers all metrics in this file func InitMetrics(registry *prometheus.Registry) { registry.MustRegister(sorterWriteDurationHistogram) + registry.MustRegister(sorterCompactDurationHistogram) registry.MustRegister(sorterWriteBytesHistogram) registry.MustRegister(sorterCleanupKVCounter) } diff --git a/cdc/sorter/leveldb/system/system.go b/cdc/sorter/leveldb/system/system.go index 924656748f6..580d671ee65 100644 --- a/cdc/sorter/leveldb/system/system.go +++ b/cdc/sorter/leveldb/system/system.go @@ -45,15 +45,17 @@ const ( // System manages db sorter resource. type System struct { - dbs []db.DB - dbSystem *actor.System - dbRouter *actor.Router - cleanSystem *actor.System - cleanRouter *actor.Router - dir string - cfg *config.DBConfig - closedCh chan struct{} - closedWg *sync.WaitGroup + dbs []db.DB + dbSystem *actor.System + dbRouter *actor.Router + cleanSystem *actor.System + cleanRouter *actor.Router + compactSystem *actor.System + compactRouter *actor.Router + dir string + cfg *config.DBConfig + closedCh chan struct{} + closedWg *sync.WaitGroup state sysState stateMu *sync.Mutex @@ -65,17 +67,21 @@ func NewSystem(dir string, cfg *config.DBConfig) *System { WorkerNumber(cfg.Count).Build() cleanSystem, cleanRouter := actor.NewSystemBuilder("cleaner"). WorkerNumber(cfg.Count).Build() + compactSystem, compactRouter := actor.NewSystemBuilder("compactor"). + WorkerNumber(cfg.Count).Build() return &System{ - dbSystem: dbSystem, - dbRouter: dbRouter, - cleanSystem: cleanSystem, - cleanRouter: cleanRouter, - dir: dir, - cfg: cfg, - closedCh: make(chan struct{}), - closedWg: new(sync.WaitGroup), - state: sysStateInit, - stateMu: new(sync.Mutex), + dbSystem: dbSystem, + dbRouter: dbRouter, + cleanSystem: cleanSystem, + cleanRouter: cleanRouter, + compactSystem: compactSystem, + compactRouter: compactRouter, + dir: dir, + cfg: cfg, + closedCh: make(chan struct{}), + closedWg: new(sync.WaitGroup), + state: sysStateInit, + stateMu: new(sync.Mutex), } } @@ -123,6 +129,7 @@ func (s *System) Start(ctx context.Context) error { } s.state = sysStateStarted + s.compactSystem.Start(ctx) s.dbSystem.Start(ctx) s.cleanSystem.Start(ctx) captureAddr := config.GetGlobalServerConfig().AdvertiseAddr @@ -134,9 +141,20 @@ func (s *System) Start(ctx context.Context) error { return errors.Trace(err) } s.dbs = append(s.dbs, db) + // Create and spawn compactor actor. + compactor, cmb, err := + lsorter.NewCompactActor(id, db, s.closedWg, captureAddr) + if err != nil { + return errors.Trace(err) + } + err = s.compactSystem.Spawn(cmb, compactor) + if err != nil { + return errors.Trace(err) + } + compact := lsorter.NewCompactScheduler(s.compactRouter, s.cfg) // Create and spawn db actor. - dbac, dbmb, err := lsorter.NewDBActor( - ctx, id, db, s.cfg, s.closedWg, captureAddr) + dbac, dbmb, err := + lsorter.NewDBActor(id, db, s.cfg, compact, s.closedWg, captureAddr) if err != nil { return errors.Trace(err) } @@ -146,7 +164,7 @@ func (s *System) Start(ctx context.Context) error { } // Create and spawn cleaner actor. clac, clmb, err := lsorter.NewCleanerActor( - id, db, s.cleanRouter, s.cfg, s.closedWg) + id, db, s.cleanRouter, compact, s.cfg, s.closedWg) if err != nil { return errors.Trace(err) } @@ -196,6 +214,7 @@ func (s *System) Stop() error { // Close actors s.broadcast(ctx, s.dbRouter, message.StopMessage()) s.broadcast(ctx, s.cleanRouter, message.StopMessage()) + s.broadcast(ctx, s.compactRouter, message.StopMessage()) // Close metrics goroutine. close(s.closedCh) // Wait actors and metrics goroutine. @@ -210,6 +229,10 @@ func (s *System) Stop() error { if err != nil { return errors.Trace(err) } + err = s.compactSystem.Stop() + if err != nil { + return errors.Trace(err) + } // Close dbs. for _, db := range s.dbs { diff --git a/cdc/sorter/leveldb/table_sorter.go b/cdc/sorter/leveldb/table_sorter.go index 68deefbdd93..332d7d29e15 100644 --- a/cdc/sorter/leveldb/table_sorter.go +++ b/cdc/sorter/leveldb/table_sorter.go @@ -33,7 +33,7 @@ import ( ) const ( - // Capacity of leveldb sorter input and output channels. + // Capacity of db sorter input and output channels. sorterInputCap, sorterOutputCap = 64, 64 // Max size of received event batch. batchReceiveEventSize = 32 @@ -65,8 +65,8 @@ type Sorter struct { metricTotalEventsResolvedTs prometheus.Counter } -// NewLevelDBSorter creates a new LevelDBSorter -func NewLevelDBSorter( +// NewSorter creates a new Sorter +func NewSorter( ctx context.Context, tableID int64, startTs uint64, router *actor.Router, actorID actor.ID, ) *Sorter { @@ -138,9 +138,14 @@ func (ls *Sorter) wait( inputCount, kvEventCount, resolvedEventCount := 0, 0, 0 appendInputEvent := func(ev *model.PolymorphicEvent) { if ls.lastSentResolvedTs != 0 && ev.CRTs < ls.lastSentResolvedTs { - log.Panic("commit ts < resolved ts", + // Since TiKV/Puller may send out of order or duplicated events, + // we should not panic here. + // Regression is not a common case, use warn level to rise our + // attention. + log.Warn("commit ts < resolved ts", zap.Uint64("lastSentResolvedTs", ls.lastSentResolvedTs), zap.Any("event", ev), zap.Uint64("regionID", ev.RegionID())) + return } if ev.RawKV.OpType == model.OpTypeResolved { if maxResolvedTs < ev.CRTs { @@ -527,7 +532,7 @@ func (ls *Sorter) poll(ctx context.Context, state *pollState) error { return nil } -// Run runs LevelDBSorter +// Run runs Sorter func (ls *Sorter) Run(ctx context.Context) error { state := &pollState{ eventsBuf: make([]*model.PolymorphicEvent, batchReceiveEventSize), diff --git a/cdc/sorter/leveldb/table_sorter_test.go b/cdc/sorter/leveldb/table_sorter_test.go index 98724a822ce..ef4fab6f157 100644 --- a/cdc/sorter/leveldb/table_sorter_test.go +++ b/cdc/sorter/leveldb/table_sorter_test.go @@ -31,17 +31,43 @@ import ( "golang.org/x/sync/semaphore" ) -func newTestLeveldbSorter( +func newTestSorter( ctx context.Context, capacity int, ) (*Sorter, actor.Mailbox) { id := actor.ID(1) router := actor.NewRouter("teet") mb := actor.NewMailbox(1, capacity) router.InsertMailbox4Test(id, mb) - ls := NewLevelDBSorter(ctx, 1, 1, router, id) + ls := NewSorter(ctx, 1, 1, router, id) return ls, mb } +func TestInputOutOfOrder(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Poll twice. + capacity := 2 + require.Greater(t, batchReceiveEventSize, capacity) + ls, _ := newTestSorter(ctx, capacity) + + ls.AddEntry(ctx, model.NewResolvedPolymorphicEvent(0, 2)) + ls.AddEntry(ctx, model.NewResolvedPolymorphicEvent(0, 3)) + require.Nil(t, ls.poll(ctx, &pollState{ + eventsBuf: make([]*model.PolymorphicEvent, 1), + outputBuf: newOutputBuffer(1), + })) + require.EqualValues(t, model.NewResolvedPolymorphicEvent(0, 3), <-ls.Output()) + + ls.AddEntry(ctx, model.NewResolvedPolymorphicEvent(0, 2)) + require.Nil(t, ls.poll(ctx, &pollState{ + eventsBuf: make([]*model.PolymorphicEvent, 1), + outputBuf: newOutputBuffer(1), + })) +} + func TestWaitInput(t *testing.T) { t.Parallel() // Make sure input capacity is larger than batch size in order to test @@ -53,7 +79,7 @@ func TestWaitInput(t *testing.T) { capacity := 8 require.Greater(t, batchReceiveEventSize, capacity) - ls, _ := newTestLeveldbSorter(ctx, capacity) + ls, _ := newTestSorter(ctx, capacity) // Nonbuffered channel is unavailable during the test. ls.outputCh = make(chan *model.PolymorphicEvent) @@ -158,7 +184,7 @@ func TestWaitOutput(t *testing.T) { capacity := 4 require.Greater(t, batchReceiveEventSize, capacity) - ls, _ := newTestLeveldbSorter(ctx, capacity) + ls, _ := newTestSorter(ctx, capacity) eventsBuf := make([]*model.PolymorphicEvent, batchReceiveEventSize) @@ -191,7 +217,7 @@ func TestAsyncWrite(t *testing.T) { capacity := 4 require.Greater(t, batchReceiveEventSize, capacity) - ls, mb := newTestLeveldbSorter(ctx, capacity) + ls, mb := newTestSorter(ctx, capacity) cases := []struct { events []*model.PolymorphicEvent @@ -299,7 +325,7 @@ func TestOutput(t *testing.T) { defer cancel() capacity := 4 - ls, _ := newTestLeveldbSorter(ctx, capacity) + ls, _ := newTestSorter(ctx, capacity) ls.outputCh = make(chan *model.PolymorphicEvent, 1) ok := ls.output(&model.PolymorphicEvent{CRTs: 1}) @@ -326,7 +352,7 @@ func TestOutputBufferedResolvedEvents(t *testing.T) { defer cancel() capacity := 4 - ls, _ := newTestLeveldbSorter(ctx, capacity) + ls, _ := newTestSorter(ctx, capacity) buf := newOutputBuffer(capacity) @@ -534,7 +560,7 @@ func TestOutputIterEvents(t *testing.T) { defer cancel() capacity := 4 - ls, _ := newTestLeveldbSorter(ctx, capacity) + ls, _ := newTestSorter(ctx, capacity) // Prepare data, 3 txns, 3 events for each. // CRTs 2, StartTs 1, keys (0|1|2) @@ -702,7 +728,7 @@ func TestPoll(t *testing.T) { defer cancel() capacity := 4 - ls, mb := newTestLeveldbSorter(ctx, capacity) + ls, mb := newTestSorter(ctx, capacity) // Prepare data, 3 txns, 3 events for each. // CRTs 2, StartTs 1, keys (0|1|2) @@ -941,7 +967,7 @@ func TestTryAddEntry(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() capacity := 1 - ls, _ := newTestLeveldbSorter(ctx, capacity) + ls, _ := newTestSorter(ctx, capacity) resolvedTs1 := model.NewResolvedPolymorphicEvent(0, 1) sent, err := ls.TryAddEntry(ctx, resolvedTs1) diff --git a/cdc/sorter/sorter.go b/cdc/sorter/sorter.go index c5f3dc7a8bb..9af631be776 100644 --- a/cdc/sorter/sorter.go +++ b/cdc/sorter/sorter.go @@ -23,6 +23,7 @@ import ( // sorted PolymorphicEvents in Output channel type EventSorter interface { Run(ctx context.Context) error + // TODO add constraints to entries, e.g., order and duplication guarantees. AddEntry(ctx context.Context, entry *model.PolymorphicEvent) // TryAddEntry tries to add and entry to the sorter. // Returns false if the entry can not be added; otherwise it returns true diff --git a/dm/dm/master/openapi.go b/dm/dm/master/openapi.go index dd6ba717553..610265e0ea3 100644 --- a/dm/dm/master/openapi.go +++ b/dm/dm/master/openapi.go @@ -33,6 +33,7 @@ import ( "github.com/pingcap/tiflow/dm/dm/pb" "github.com/pingcap/tiflow/dm/openapi" "github.com/pingcap/tiflow/dm/pkg/conn" + "github.com/pingcap/tiflow/dm/pkg/ha" "github.com/pingcap/tiflow/dm/pkg/log" "github.com/pingcap/tiflow/dm/pkg/terror" "github.com/pingcap/tiflow/dm/pkg/utils" @@ -822,6 +823,123 @@ func (s *Server) DMAPIOperateTableStructure(c *gin.Context, taskName string, sou } } +// DMAPIImportTaskConfig create task_config_template url is: (POST /api/v1/task/configs/import). +func (s *Server) DMAPIImportTaskConfig(c *gin.Context) { + var req openapi.TaskConfigRequest + if err := c.Bind(&req); err != nil { + _ = c.Error(err) + return + } + resp := openapi.TaskConfigResponse{ + FailedTaskList: []struct { + ErrorMsg string `json:"error_msg"` + TaskName string `json:"task_name"` + }{}, + SuccessTaskList: []string{}, + } + for _, task := range config.SubTaskConfigsToOpenAPITask(s.scheduler.GetSubTaskCfgs()) { + if err := ha.PutOpenAPITaskConfig(s.etcdClient, task, req.Overwrite); err != nil { + resp.FailedTaskList = append(resp.FailedTaskList, struct { + ErrorMsg string `json:"error_msg"` + TaskName string `json:"task_name"` + }{ + ErrorMsg: err.Error(), + TaskName: task.Name, + }) + } else { + resp.SuccessTaskList = append(resp.SuccessTaskList, task.Name) + } + } + c.IndentedJSON(http.StatusAccepted, resp) +} + +// DMAPICreateTaskConfig create task_config_template url is: (POST /api/task/configs). +func (s *Server) DMAPICreateTaskConfig(c *gin.Context) { + task := &openapi.Task{} + if err := c.Bind(task); err != nil { + _ = c.Error(err) + return + } + if err := task.Adjust(); err != nil { + _ = c.Error(err) + return + } + // prepare target db config + newCtx := c.Request.Context() + toDBCfg := config.GetTargetDBCfgFromOpenAPITask(task) + if adjustDBErr := adjustTargetDB(newCtx, toDBCfg); adjustDBErr != nil { + _ = c.Error(terror.WithClass(adjustDBErr, terror.ClassDMMaster)) + return + } + if err := ha.PutOpenAPITaskConfig(s.etcdClient, *task, false); err != nil { + _ = c.Error(err) + return + } + c.IndentedJSON(http.StatusCreated, task) +} + +// DMAPIGetTaskConfigList get task_config_template list url is: (GET /api/v1/task/configs). +func (s *Server) DMAPIGetTaskConfigList(c *gin.Context) { + TaskConfigList, err := ha.GetAllOpenAPITaskConfig(s.etcdClient) + if err != nil { + _ = c.Error(err) + return + } + taskList := make([]openapi.Task, len(TaskConfigList)) + for i, TaskConfig := range TaskConfigList { + taskList[i] = *TaskConfig + } + resp := openapi.GetTaskListResponse{Total: len(TaskConfigList), Data: taskList} + c.IndentedJSON(http.StatusOK, resp) +} + +// DMAPIDeleteTaskConfig delete task_config_template url is: (DELETE /api/v1/task/configs/{task-name}). +func (s *Server) DMAPIDeleteTaskConfig(c *gin.Context, taskName string) { + if err := ha.DeleteOpenAPITaskConfig(s.etcdClient, taskName); err != nil { + _ = c.Error(err) + return + } + c.Status(http.StatusNoContent) +} + +// DMAPIGetTaskConfig get task_config_template url is: (GET /api/v1/task/configs/{task-name}). +func (s *Server) DMAPIGetTaskConfig(c *gin.Context, taskName string) { + task, err := ha.GetOpenAPITaskConfig(s.etcdClient, taskName) + if err != nil { + _ = c.Error(err) + return + } + if task == nil { + _ = c.Error(terror.ErrOpenAPITaskConfigNotExist.Generate(taskName)) + return + } + c.IndentedJSON(http.StatusOK, task) +} + +// DMAPUpdateTaskConfig update task_config_template url is: (PUT /api/v1/task/configs/{task-name}). +func (s *Server) DMAPUpdateTaskConfig(c *gin.Context, taskName string) { + task := &openapi.Task{} + if err := c.Bind(task); err != nil { + _ = c.Error(err) + return + } + if err := task.Adjust(); err != nil { + _ = c.Error(err) + return + } + newCtx := c.Request.Context() + toDBCfg := config.GetTargetDBCfgFromOpenAPITask(task) + if adjustDBErr := adjustTargetDB(newCtx, toDBCfg); adjustDBErr != nil { + _ = c.Error(terror.WithClass(adjustDBErr, terror.ClassDMMaster)) + return + } + if err := ha.UpdateOpenAPITaskConfig(s.etcdClient, *task); err != nil { + _ = c.Error(err) + return + } + c.IndentedJSON(http.StatusOK, task) +} + func terrorHTTPErrorHandler() gin.HandlerFunc { return func(c *gin.Context) { c.Next() diff --git a/dm/dm/master/openapi_test.go b/dm/dm/master/openapi_test.go index a1f7b869e84..a139c43d9c0 100644 --- a/dm/dm/master/openapi_test.go +++ b/dm/dm/master/openapi_test.go @@ -496,6 +496,33 @@ func (t *openAPISuite) TestTaskAPI(c *check.C) { c.Assert(resultTaskList.Total, check.Equals, 1) c.Assert(resultTaskList.Data[0].Name, check.Equals, task.Name) + // test batch import task config + taskBatchImportURL := "/api/v1/task/configs/import" + req := openapi.TaskConfigRequest{Overwrite: false} + result = testutil.NewRequest().Post(taskBatchImportURL).WithJsonBody(req).GoWithHTTPHandler(t.testT, s.openapiHandles) + c.Assert(result.Code(), check.Equals, http.StatusAccepted) + var resp openapi.TaskConfigResponse + c.Assert(result.UnmarshalBodyToObject(&resp), check.IsNil) + c.Assert(resp.SuccessTaskList, check.HasLen, 1) + c.Assert(resp.SuccessTaskList[0], check.Equals, task.Name) + c.Assert(resp.FailedTaskList, check.HasLen, 0) + + // import again without overwrite will fail + result = testutil.NewRequest().Post(taskBatchImportURL).WithJsonBody(req).GoWithHTTPHandler(t.testT, s.openapiHandles) + c.Assert(result.Code(), check.Equals, http.StatusAccepted) + c.Assert(result.UnmarshalBodyToObject(&resp), check.IsNil) + c.Assert(resp.SuccessTaskList, check.HasLen, 0) + c.Assert(resp.FailedTaskList, check.HasLen, 1) + c.Assert(resp.FailedTaskList[0].TaskName, check.Equals, task.Name) + + // import again with overwrite will success + req.Overwrite = true + result = testutil.NewRequest().Post(taskBatchImportURL).WithJsonBody(req).GoWithHTTPHandler(t.testT, s.openapiHandles) + c.Assert(result.UnmarshalBodyToObject(&resp), check.IsNil) + c.Assert(resp.SuccessTaskList, check.HasLen, 1) + c.Assert(resp.SuccessTaskList[0], check.Equals, task.Name) + c.Assert(resp.FailedTaskList, check.HasLen, 0) + // pause and resume task pauseTaskURL := fmt.Sprintf("%s/%s/pause", taskURL, task.Name) result = testutil.NewRequest().Post(pauseTaskURL).GoWithHTTPHandler(t.testT, s.openapiHandles) @@ -634,6 +661,112 @@ func (t *openAPISuite) TestClusterAPI(c *check.C) { cancel1() } +func (t *openAPISuite) TestTaskConfigsAPI(c *check.C) { + ctx, cancel := context.WithCancel(context.Background()) + s := setupServer(ctx, c) + c.Assert(failpoint.Enable("github.com/pingcap/tiflow/dm/dm/master/MockSkipAdjustTargetDB", `return(true)`), check.IsNil) + checker.CheckSyncConfigFunc = mockCheckSyncConfig + defer func() { + checker.CheckSyncConfigFunc = checker.CheckSyncConfig + cancel() + s.Close() + c.Assert(failpoint.Disable("github.com/pingcap/tiflow/dm/dm/master/MockSkipAdjustTargetDB"), check.IsNil) + }() + + dbCfg := config.GetDBConfigForTest() + source1 := openapi.Source{ + SourceName: source1Name, + EnableGtid: false, + Host: dbCfg.Host, + Password: dbCfg.Password, + Port: dbCfg.Port, + User: dbCfg.User, + } + // create source + sourceURL := "/api/v1/sources" + result := testutil.NewRequest().Post(sourceURL).WithJsonBody(source1).GoWithHTTPHandler(t.testT, s.openapiHandles) + // check http status code + c.Assert(result.Code(), check.Equals, http.StatusCreated) + + // create task config template + url := "/api/v1/task/configs" + + task, err := fixtures.GenNoShardOpenAPITaskForTest() + c.Assert(err, check.IsNil) + // use a valid target db + task.TargetConfig.Host = dbCfg.Host + task.TargetConfig.Port = dbCfg.Port + task.TargetConfig.User = dbCfg.User + task.TargetConfig.Password = dbCfg.Password + + // create one + result = testutil.NewRequest().Post(url).WithJsonBody(task).GoWithHTTPHandler(t.testT, s.openapiHandles) + c.Assert(result.Code(), check.Equals, http.StatusCreated) + var createTaskResp openapi.Task + err = result.UnmarshalBodyToObject(&createTaskResp) + c.Assert(err, check.IsNil) + c.Assert(task.Name, check.Equals, createTaskResp.Name) + + // create again will fail + result = testutil.NewRequest().Post(url).WithJsonBody(task).GoWithHTTPHandler(t.testT, s.openapiHandles) + c.Assert(result.Code(), check.Equals, http.StatusBadRequest) + var errResp openapi.ErrorWithMessage + err = result.UnmarshalBodyToObject(&errResp) + c.Assert(err, check.IsNil) + c.Assert(errResp.ErrorCode, check.Equals, int(terror.ErrOpenAPITaskConfigExist.Code())) + + // list templates + result = testutil.NewRequest().Get(url).GoWithHTTPHandler(t.testT, s.openapiHandles) + c.Assert(result.Code(), check.Equals, http.StatusOK) + var resultTaskList openapi.GetTaskListResponse + err = result.UnmarshalBodyToObject(&resultTaskList) + c.Assert(err, check.IsNil) + c.Assert(resultTaskList.Total, check.Equals, 1) + c.Assert(resultTaskList.Data[0].Name, check.Equals, task.Name) + + // get detail + oneURL := fmt.Sprintf("%s/%s", url, task.Name) + result = testutil.NewRequest().Get(oneURL).GoWithHTTPHandler(t.testT, s.openapiHandles) + c.Assert(result.Code(), check.Equals, http.StatusOK) + var respTask openapi.Task + err = result.UnmarshalBodyToObject(&respTask) + c.Assert(err, check.IsNil) + c.Assert(respTask.Name, check.Equals, task.Name) + + // get not exist + notExistURL := fmt.Sprintf("%s/%s", url, "notexist") + result = testutil.NewRequest().Get(notExistURL).GoWithHTTPHandler(t.testT, s.openapiHandles) + c.Assert(result.Code(), check.Equals, http.StatusBadRequest) + err = result.UnmarshalBodyToObject(&errResp) + c.Assert(err, check.IsNil) + c.Assert(errResp.ErrorCode, check.Equals, int(terror.ErrOpenAPITaskConfigNotExist.Code())) + + // update + task.TaskMode = openapi.TaskTaskModeAll + result = testutil.NewRequest().Put(oneURL).WithJsonBody(task).GoWithHTTPHandler(t.testT, s.openapiHandles) + c.Assert(result.Code(), check.Equals, http.StatusOK) + err = result.UnmarshalBodyToObject(&respTask) + c.Assert(err, check.IsNil) + c.Assert(respTask.Name, check.Equals, task.Name) + + // update not exist will fail + task.Name = "notexist" + result = testutil.NewRequest().Put(notExistURL).WithJsonBody(task).GoWithHTTPHandler(t.testT, s.openapiHandles) + c.Assert(result.Code(), check.Equals, http.StatusBadRequest) + err = result.UnmarshalBodyToObject(&errResp) + c.Assert(err, check.IsNil) + c.Assert(errResp.ErrorCode, check.Equals, int(terror.ErrOpenAPITaskConfigNotExist.Code())) + + // delete task config template + result = testutil.NewRequest().Delete(oneURL).GoWithHTTPHandler(t.testT, s.openapiHandles) + c.Assert(result.Code(), check.Equals, http.StatusNoContent) + result = testutil.NewRequest().Get(url).GoWithHTTPHandler(t.testT, s.openapiHandles) + c.Assert(result.Code(), check.Equals, http.StatusOK) + err = result.UnmarshalBodyToObject(&resultTaskList) + c.Assert(err, check.IsNil) + c.Assert(resultTaskList.Total, check.Equals, 0) +} + func setupServer(ctx context.Context, c *check.C) *Server { // create a new cluster cfg1 := NewConfig() diff --git a/dm/dm/master/server.go b/dm/dm/master/server.go index ee4f3c0fcbf..e63331498c3 100644 --- a/dm/dm/master/server.go +++ b/dm/dm/master/server.go @@ -1536,6 +1536,8 @@ func (s *Server) removeMetaData(ctx context.Context, taskName, metaSchema string // clear loader and syncer checkpoints sqls = append(sqls, fmt.Sprintf("DROP TABLE IF EXISTS %s", dbutil.TableName(metaSchema, cputil.LoaderCheckpoint(taskName)))) + sqls = append(sqls, fmt.Sprintf("DROP TABLE IF EXISTS %s", + dbutil.TableName(metaSchema, cputil.LightningCheckpoint(taskName)))) sqls = append(sqls, fmt.Sprintf("DROP TABLE IF EXISTS %s", dbutil.TableName(metaSchema, cputil.SyncerCheckpoint(taskName)))) sqls = append(sqls, fmt.Sprintf("DROP TABLE IF EXISTS %s", diff --git a/dm/dm/master/server_test.go b/dm/dm/master/server_test.go index cef3ef82bc0..bacd9088907 100644 --- a/dm/dm/master/server_test.go +++ b/dm/dm/master/server_test.go @@ -985,6 +985,7 @@ func (t *testMaster) TestStartTaskWithRemoveMeta(c *check.C) { mock := conn.InitMockDB(c) mock.ExpectBegin() mock.ExpectExec(fmt.Sprintf("DROP TABLE IF EXISTS `%s`.`%s`", cfg.MetaSchema, cputil.LoaderCheckpoint(cfg.Name))).WillReturnResult(sqlmock.NewResult(1, 1)) + mock.ExpectExec(fmt.Sprintf("DROP TABLE IF EXISTS `%s`.`%s`", cfg.MetaSchema, cputil.LightningCheckpoint(cfg.Name))).WillReturnResult(sqlmock.NewResult(1, 1)) mock.ExpectExec(fmt.Sprintf("DROP TABLE IF EXISTS `%s`.`%s`", cfg.MetaSchema, cputil.SyncerCheckpoint(cfg.Name))).WillReturnResult(sqlmock.NewResult(1, 1)) mock.ExpectExec(fmt.Sprintf("DROP TABLE IF EXISTS `%s`.`%s`", cfg.MetaSchema, cputil.SyncerShardMeta(cfg.Name))).WillReturnResult(sqlmock.NewResult(1, 1)) mock.ExpectExec(fmt.Sprintf("DROP TABLE IF EXISTS `%s`.`%s`", cfg.MetaSchema, cputil.SyncerOnlineDDL(cfg.Name))).WillReturnResult(sqlmock.NewResult(1, 1)) @@ -1077,6 +1078,7 @@ func (t *testMaster) TestStartTaskWithRemoveMeta(c *check.C) { mock = conn.InitMockDB(c) mock.ExpectBegin() mock.ExpectExec(fmt.Sprintf("DROP TABLE IF EXISTS `%s`.`%s`", cfg.MetaSchema, cputil.LoaderCheckpoint(cfg.Name))).WillReturnResult(sqlmock.NewResult(1, 1)) + mock.ExpectExec(fmt.Sprintf("DROP TABLE IF EXISTS `%s`.`%s`", cfg.MetaSchema, cputil.LightningCheckpoint(cfg.Name))).WillReturnResult(sqlmock.NewResult(1, 1)) mock.ExpectExec(fmt.Sprintf("DROP TABLE IF EXISTS `%s`.`%s`", cfg.MetaSchema, cputil.SyncerCheckpoint(cfg.Name))).WillReturnResult(sqlmock.NewResult(1, 1)) mock.ExpectExec(fmt.Sprintf("DROP TABLE IF EXISTS `%s`.`%s`", cfg.MetaSchema, cputil.SyncerShardMeta(cfg.Name))).WillReturnResult(sqlmock.NewResult(1, 1)) mock.ExpectExec(fmt.Sprintf("DROP TABLE IF EXISTS `%s`.`%s`", cfg.MetaSchema, cputil.SyncerOnlineDDL(cfg.Name))).WillReturnResult(sqlmock.NewResult(1, 1)) diff --git a/dm/dm/worker/config.go b/dm/dm/worker/config.go index 30947e5dcd6..68793a41328 100644 --- a/dm/dm/worker/config.go +++ b/dm/dm/worker/config.go @@ -99,6 +99,8 @@ type Config struct { KeepAliveTTL int64 `toml:"keepalive-ttl" json:"keepalive-ttl"` RelayKeepAliveTTL int64 `toml:"relay-keepalive-ttl" json:"relay-keepalive-ttl"` + RelayDir string `toml:"relay-dir" json:"relay-dir"` + // tls config config.Security diff --git a/dm/dm/worker/dm-worker.toml b/dm/dm/worker/dm-worker.toml index 2440d0773ec..667ef3eb627 100644 --- a/dm/dm/worker/dm-worker.toml +++ b/dm/dm/worker/dm-worker.toml @@ -8,3 +8,5 @@ log-file = "dm-worker.log" worker-addr = ":8262" advertise-addr = "127.0.0.1:8262" join = "127.0.0.1:8261" + +relay-dir = "/tmp/relay" diff --git a/dm/dm/worker/server.go b/dm/dm/worker/server.go index 3ffd3984fdb..da2952c6271 100644 --- a/dm/dm/worker/server.go +++ b/dm/dm/worker/server.go @@ -822,7 +822,7 @@ func (s *Server) getOrStartWorker(cfg *config.SourceConfig, needLock bool) (*Sou } log.L().Info("will start a new worker", zap.String("sourceID", cfg.SourceID)) - w, err := NewSourceWorker(cfg, s.etcdClient, s.cfg.Name) + w, err := NewSourceWorker(cfg, s.etcdClient, s.cfg.Name, s.cfg.RelayDir) if err != nil { return nil, err } diff --git a/dm/dm/worker/source_worker.go b/dm/dm/worker/source_worker.go index d1b67ca8a7c..607c5f530eb 100644 --- a/dm/dm/worker/source_worker.go +++ b/dm/dm/worker/source_worker.go @@ -16,6 +16,7 @@ package worker import ( "context" "fmt" + "path/filepath" "sync" "time" @@ -77,6 +78,7 @@ type SourceWorker struct { relayWg sync.WaitGroup relayHolder RelayHolder relayPurger relay.Purger + relayDir string startedRelayBySourceCfg bool @@ -89,13 +91,19 @@ type SourceWorker struct { // NewSourceWorker creates a new SourceWorker. The functionality of relay and subtask is disabled by default, need call EnableRelay // and EnableSubtask later. -func NewSourceWorker(cfg *config.SourceConfig, etcdClient *clientv3.Client, name string) (w *SourceWorker, err error) { +func NewSourceWorker( + cfg *config.SourceConfig, + etcdClient *clientv3.Client, + name string, + relayDir string, +) (w *SourceWorker, err error) { w = &SourceWorker{ cfg: cfg, subTaskHolder: newSubTaskHolder(), l: log.With(zap.String("component", "worker controller")), etcdClient: etcdClient, name: name, + relayDir: relayDir, } // keep running until canceled in `Close`. w.ctx, w.cancel = context.WithCancel(context.Background()) @@ -334,6 +342,13 @@ func (w *SourceWorker) EnableRelay(startBySourceCfg bool) (err error) { } // 2. initial relay holder, the cfg's password need decrypt + // worker's relay-dir has higher priority than source's relay-dir + if w.relayDir != "" { + workerRelayDir := filepath.Join(w.relayDir, w.name) + log.L().Info("use worker's relay-dir", zap.String("RelayDir", workerRelayDir)) + w.cfg.RelayDir = workerRelayDir + } + w.relayHolder = NewRelayHolder(w.cfg) relayPurger, err := w.relayHolder.Init(w.relayCtx, []relay.PurgeInterceptor{ w, diff --git a/dm/dm/worker/source_worker_test.go b/dm/dm/worker/source_worker_test.go index 45335adf835..5dd29bce6c9 100644 --- a/dm/dm/worker/source_worker_test.go +++ b/dm/dm/worker/source_worker_test.go @@ -79,12 +79,12 @@ func (t *testServer) testWorker(c *C) { defer func() { NewRelayHolder = NewRealRelayHolder }() - w, err := NewSourceWorker(cfg, etcdCli, "") + w, err := NewSourceWorker(cfg, etcdCli, "", "") c.Assert(err, IsNil) c.Assert(w.EnableRelay(false), ErrorMatches, "init error") NewRelayHolder = NewDummyRelayHolder - w, err = NewSourceWorker(cfg, etcdCli, "") + w, err = NewSourceWorker(cfg, etcdCli, "", "") c.Assert(err, IsNil) c.Assert(w.GetUnitAndSourceStatusJSON("", nil), HasLen, emptyWorkerStatusInfoJSONLength) @@ -292,7 +292,7 @@ func (t *testWorkerFunctionalities) TestWorkerFunctionalities(c *C) { c.Assert(err, IsNil) // start worker - w, err := NewSourceWorker(sourceCfg, etcdCli, "") + w, err := NewSourceWorker(sourceCfg, etcdCli, "", "") c.Assert(err, IsNil) defer w.Close() go func() { @@ -463,7 +463,7 @@ func (t *testWorkerEtcdCompact) TestWatchSubtaskStageEtcdCompact(c *C) { sourceCfg.EnableRelay = false // step 1: start worker - w, err := NewSourceWorker(sourceCfg, etcdCli, "") + w, err := NewSourceWorker(sourceCfg, etcdCli, "", "") c.Assert(err, IsNil) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -582,7 +582,7 @@ func (t *testWorkerEtcdCompact) TestWatchRelayStageEtcdCompact(c *C) { sourceCfg.MetaDir = c.MkDir() // step 1: start worker - w, err := NewSourceWorker(sourceCfg, etcdCli, "") + w, err := NewSourceWorker(sourceCfg, etcdCli, "", "") c.Assert(err, IsNil) ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/dm/dm/worker/task_checker_test.go b/dm/dm/worker/task_checker_test.go index 181452b70f7..5a58a16a5eb 100644 --- a/dm/dm/worker/task_checker_test.go +++ b/dm/dm/worker/task_checker_test.go @@ -89,7 +89,7 @@ func (s *testTaskCheckerSuite) TestCheck(c *check.C) { cfg := loadSourceConfigWithoutPassword(c) cfg.RelayDir = dir cfg.MetaDir = dir - w, err := NewSourceWorker(cfg, nil, "") + w, err := NewSourceWorker(cfg, nil, "", "") c.Assert(err, check.IsNil) w.closed.Store(false) @@ -204,7 +204,7 @@ func (s *testTaskCheckerSuite) TestCheckTaskIndependent(c *check.C) { cfg := loadSourceConfigWithoutPassword(c) cfg.RelayDir = dir cfg.MetaDir = dir - w, err := NewSourceWorker(cfg, nil, "") + w, err := NewSourceWorker(cfg, nil, "", "") c.Assert(err, check.IsNil) w.closed.Store(false) diff --git a/dm/loader/checkpoint.go b/dm/loader/checkpoint.go index a16ca4ef9df..c99afa2a34c 100644 --- a/dm/loader/checkpoint.go +++ b/dm/loader/checkpoint.go @@ -32,10 +32,6 @@ import ( "go.uber.org/zap" ) -const ( - LightningCheckpointListName = "lightning_checkpoint_list" -) - // CheckPoint represents checkpoint status. type CheckPoint interface { // Load loads all checkpoints recorded before. @@ -470,19 +466,58 @@ func (cp *RemoteCheckPoint) String() string { return string(bytes) } +type lightingLoadStatus int + +const ( + lightningStatusInit lightingLoadStatus = iota + lightningStatusRunning + lightningStatusFinished +) + +func (s lightingLoadStatus) String() string { + switch s { + case lightningStatusInit: + return "init" + case lightningStatusRunning: + return "running" + case lightningStatusFinished: + return "finished" + default: + panic(fmt.Sprintf("unknown lightning load stauts '%d'", s)) + } +} + +func parseLightningLoadStatus(s string) lightingLoadStatus { + switch s { + case "running": + return lightningStatusRunning + case "finished": + return lightningStatusFinished + case "init": + return lightningStatusInit + default: + log.L().Warn("unknown lightning load status, will fallback to init", zap.String("status", s)) + return lightningStatusInit + } +} + type LightningCheckpointList struct { - db *conn.BaseDB - schema string - tableName string - logger log.Logger + db *conn.BaseDB + schema string + tableName string + taskName string + sourceName string + logger log.Logger } -func NewLightningCheckpointList(db *conn.BaseDB, metaSchema string) *LightningCheckpointList { +func NewLightningCheckpointList(db *conn.BaseDB, taskName, sourceName, metaSchema string) *LightningCheckpointList { return &LightningCheckpointList{ - db: db, - schema: dbutil.ColumnName(metaSchema), - tableName: dbutil.TableName(metaSchema, LightningCheckpointListName), - logger: log.L().WithFields(zap.String("component", "lightning checkpoint database list")), + db: db, + schema: dbutil.ColumnName(metaSchema), + tableName: dbutil.TableName(metaSchema, cputil.LightningCheckpoint(taskName)), + taskName: taskName, + sourceName: sourceName, + logger: log.L().WithFields(zap.String("component", "lightning checkpoint database list")), } } @@ -491,6 +526,8 @@ func (cp *LightningCheckpointList) Prepare(ctx context.Context) error { if err != nil { return terror.WithScope(terror.Annotate(err, "initialize connection when prepare"), terror.ScopeDownstream) } + defer conn.CloseBaseConnWithoutErr(cp.db, connection) + createSchema := fmt.Sprintf("CREATE SCHEMA IF NOT EXISTS %s", cp.schema) tctx := tcontext.NewContext(ctx, log.With(zap.String("job", "lightning-checkpoint"))) _, err = connection.ExecuteSQL(tctx, nil, "lightning-checkpoint", []string{createSchema}) @@ -498,9 +535,10 @@ func (cp *LightningCheckpointList) Prepare(ctx context.Context) error { return err } createTable := `CREATE TABLE IF NOT EXISTS %s ( - worker_name varchar(255) NOT NULL, task_name varchar(255) NOT NULL, - PRIMARY KEY (task_name, worker_name) + source_name varchar(255) NOT NULL, + status varchar(10) NOT NULL DEFAULT 'init' COMMENT 'init,running,finished', + PRIMARY KEY (task_name, source_name) ); ` sql2 := fmt.Sprintf(createTable, cp.tableName) @@ -508,18 +546,18 @@ func (cp *LightningCheckpointList) Prepare(ctx context.Context) error { return terror.WithScope(err, terror.ScopeDownstream) } -func (cp *LightningCheckpointList) RegisterCheckPoint(ctx context.Context, workerName, taskName string) error { +func (cp *LightningCheckpointList) RegisterCheckPoint(ctx context.Context) error { connection, err := cp.db.GetBaseConn(ctx) if err != nil { return terror.WithScope(terror.Annotate(err, "initialize connection"), terror.ScopeDownstream) } + defer conn.CloseBaseConnWithoutErr(cp.db, connection) - sql := fmt.Sprintf("INSERT IGNORE INTO %s (`worker_name`, `task_name`) VALUES(?,?)", cp.tableName) + sql := fmt.Sprintf("INSERT IGNORE INTO %s (`task_name`, `source_name`) VALUES (?, ?)", cp.tableName) cp.logger.Info("initial checkpoint record", - zap.String("sql", sql), - zap.String("worker-name", workerName), - zap.String("task-name", taskName)) - args := []interface{}{workerName, taskName} + zap.String("task", cp.taskName), + zap.String("source", cp.sourceName)) + args := []interface{}{cp.taskName, cp.sourceName} tctx := tcontext.NewContext(ctx, log.With(zap.String("job", "lightning-checkpoint"))) _, err = connection.ExecuteSQL(tctx, nil, "lightning-checkpoint", []string{sql}, args) if err != nil { @@ -528,35 +566,49 @@ func (cp *LightningCheckpointList) RegisterCheckPoint(ctx context.Context, worke return nil } -func (cp *LightningCheckpointList) RemoveTaskCheckPoint(ctx context.Context, taskName string) error { +func (cp *LightningCheckpointList) UpdateStatus(ctx context.Context, status lightingLoadStatus) error { connection, err := cp.db.GetBaseConn(ctx) if err != nil { return terror.WithScope(terror.Annotate(err, "initialize connection"), terror.ScopeDownstream) } + defer conn.CloseBaseConnWithoutErr(cp.db, connection) + sql := fmt.Sprintf("UPDATE %s set status = ? WHERE `task_name` = ? AND `source_name` = ?", cp.tableName) + cp.logger.Info("update lightning loader status", + zap.String("task", cp.taskName), zap.String("source", cp.sourceName), + zap.Stringer("status", status)) tctx := tcontext.NewContext(ctx, log.With(zap.String("job", "lightning-checkpoint"))) - query := fmt.Sprintf("SELECT `worker_name` from %s where `task_name`=?", cp.tableName) - rows, err := connection.QuerySQL(tctx, query, taskName) + _, err = connection.ExecuteSQL(tctx, nil, "lightning-checkpoint", []string{sql}, + []interface{}{status.String(), cp.taskName, cp.sourceName}) if err != nil { - return terror.WithScope(err, terror.ScopeDownstream) + return terror.WithScope(terror.Annotate(err, "update lightning status"), terror.ScopeDownstream) + } + return nil +} + +func (cp *LightningCheckpointList) taskStatus(ctx context.Context) (lightingLoadStatus, error) { + connection, err := cp.db.GetBaseConn(ctx) + if err != nil { + return lightningStatusInit, terror.WithScope(terror.Annotate(err, "initialize connection"), terror.ScopeDownstream) + } + defer conn.CloseBaseConnWithoutErr(cp.db, connection) + + query := fmt.Sprintf("SELECT status FROM %s WHERE `task_name` = ? AND `source_name` = ?", cp.tableName) + tctx := tcontext.NewContext(ctx, log.With(zap.String("job", "lightning-checkpoint"))) + rows, err := connection.QuerySQL(tctx, query, cp.taskName, cp.sourceName) + if err != nil { + return lightningStatusInit, err } defer rows.Close() - var workerName string - for rows.Next() { - err = rows.Scan(&workerName) - if err != nil { - return terror.WithScope(terror.DBErrorAdapt(err, terror.ErrDBDriverError), terror.ScopeDownstream) - } - cpdb := config.TiDBLightningCheckpointPrefix + dbutil.TableName(workerName, taskName) - sql := fmt.Sprintf("DROP DATABASE IF NOT EXISTS %s", cpdb) - _, err = connection.ExecuteSQL(tctx, nil, "lightning-checkpoint", []string{sql}) - if err != nil { - return terror.WithScope(err, terror.ScopeDownstream) + if rows.Next() { + var status string + if err = rows.Scan(&status); err != nil { + return lightningStatusInit, terror.WithScope(err, terror.ScopeDownstream) } + return parseLightningLoadStatus(status), nil } - query = fmt.Sprintf("DELETE from %s where `task_name`=?", cp.tableName) - _, err = connection.ExecuteSQL(tctx, nil, "lightning-checkpoint", []string{query}, []interface{}{taskName}) - return terror.WithScope(err, terror.ScopeDownstream) + // status row doesn't exist, return default value + return lightningStatusInit, nil } // Close implements CheckPoint.Close. diff --git a/dm/loader/checkpoint_test.go b/dm/loader/checkpoint_test.go index 976d984bcdc..7fe2318aec3 100644 --- a/dm/loader/checkpoint_test.go +++ b/dm/loader/checkpoint_test.go @@ -14,6 +14,8 @@ package loader import ( + "context" + "database/sql" "fmt" "os" "strconv" @@ -27,7 +29,10 @@ import ( "github.com/pingcap/tiflow/dm/pkg/cputil" ) -var _ = Suite(&testCheckPointSuite{}) +var ( + _ = Suite(&testCheckPointSuite{}) + _ = Suite(&lightningCpListSuite{}) +) var ( schemaCreateSQL = "" @@ -259,3 +264,75 @@ func (t *testCheckPointSuite) TestDeepCopy(c *C) { cp.restoringFiles.pos["db"]["table"]["file3"] = []int64{0, 100} c.Assert(ret, DeepEquals, map[string][]int64{"file": {10, 100}, "file2": {0, 100}}) } + +type lightningCpListSuite struct { + mock sqlmock.Sqlmock + cpList *LightningCheckpointList +} + +func (s *lightningCpListSuite) SetUpTest(c *C) { + s.mock = conn.InitMockDB(c) + + baseDB, err := conn.DefaultDBProvider.Apply(&config.DBConfig{}) + c.Assert(err, IsNil) + + metaSchema := "dm_meta" + cpList := NewLightningCheckpointList(baseDB, "test_lightning", "source1", metaSchema) + + s.cpList = cpList +} + +func (s *lightningCpListSuite) TearDownTest(c *C) { + c.Assert(s.mock.ExpectationsWereMet(), IsNil) +} + +func (s *lightningCpListSuite) TestLightningCheckpointListPrepare(c *C) { + ctx := context.Background() + s.mock.ExpectBegin() + s.mock.ExpectExec(fmt.Sprintf("CREATE SCHEMA IF NOT EXISTS %s.*", s.cpList.schema)).WillReturnResult(sqlmock.NewResult(1, 1)) + s.mock.ExpectCommit() + s.mock.ExpectBegin() + s.mock.ExpectExec(fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s.*", s.cpList.tableName)).WillReturnResult(sqlmock.NewResult(1, 1)) + s.mock.ExpectCommit() + err := s.cpList.Prepare(ctx) + c.Assert(err, IsNil) +} + +func (s *lightningCpListSuite) TestLightningCheckpointListStatusInit(c *C) { + // no rows in target table, will return default status + s.mock.ExpectQuery(fmt.Sprintf("SELECT status FROM %s WHERE `task_name` = \\? AND `source_name` = \\?", s.cpList.tableName)). + WithArgs(s.cpList.taskName, s.cpList.sourceName). + WillReturnRows(sqlmock.NewRows([]string{"status"}).RowError(0, sql.ErrNoRows)) + status, err := s.cpList.taskStatus(context.Background()) + c.Assert(err, IsNil) + c.Assert(status, Equals, lightningStatusInit) +} + +func (s *lightningCpListSuite) TestLightningCheckpointListStatusRunning(c *C) { + s.mock.ExpectQuery(fmt.Sprintf("SELECT status FROM %s WHERE `task_name` = \\? AND `source_name` = \\?", s.cpList.tableName)). + WithArgs(s.cpList.taskName, s.cpList.sourceName). + WillReturnRows(sqlmock.NewRows([]string{"status"}).AddRow("running")) + status, err := s.cpList.taskStatus(context.Background()) + c.Assert(err, IsNil) + c.Assert(status, Equals, lightningStatusRunning) +} + +func (s *lightningCpListSuite) TestLightningCheckpointListRegister(c *C) { + s.mock.ExpectBegin() + s.mock.ExpectExec(fmt.Sprintf("INSERT IGNORE INTO %s \\(`task_name`, `source_name`\\) VALUES \\(\\?, \\?\\)", s.cpList.tableName)). + WithArgs(s.cpList.taskName, s.cpList.sourceName). + WillReturnResult(sqlmock.NewResult(2, 1)) + s.mock.ExpectCommit() + err := s.cpList.RegisterCheckPoint(context.Background()) + c.Assert(err, IsNil) +} + +func (s *lightningCpListSuite) TestLightningCheckpointListUpdateStatus(c *C) { + s.mock.ExpectBegin() + s.mock.ExpectExec(fmt.Sprintf("UPDATE %s set status = \\? WHERE `task_name` = \\? AND `source_name` = \\?", s.cpList.tableName)). + WithArgs("running", s.cpList.taskName, s.cpList.sourceName). + WillReturnResult(sqlmock.NewResult(3, 1)) + s.mock.ExpectCommit() + err := s.cpList.UpdateStatus(context.Background(), lightningStatusRunning) + c.Assert(err, IsNil) +} diff --git a/dm/loader/lightning.go b/dm/loader/lightning.go index f0a1529c02d..66b41fdf347 100644 --- a/dm/loader/lightning.go +++ b/dm/loader/lightning.go @@ -21,7 +21,6 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" - "github.com/pingcap/tidb-tools/pkg/dbutil" "github.com/pingcap/tidb/br/pkg/lightning" lcfg "github.com/pingcap/tidb/br/pkg/lightning/config" "go.etcd.io/etcd/clientv3" @@ -35,14 +34,13 @@ import ( "github.com/pingcap/tiflow/dm/pkg/conn" tcontext "github.com/pingcap/tiflow/dm/pkg/context" "github.com/pingcap/tiflow/dm/pkg/log" - "github.com/pingcap/tiflow/dm/pkg/terror" "github.com/pingcap/tiflow/dm/pkg/utils" ) const ( - lightningCheckpointFile = "lightning.checkpoint.0.sql" - lightningCheckpointDB = "lightning" - lightningCheckpointTable = "checkpoint" + // checkpoint file name for lightning loader + // this file is used to store the real checkpoint data for lightning. + lightningCheckpointFileName = "tidb_lightning_checkpoint.pb" ) // LightningLoader can load your mydumper data into TiDB database. @@ -53,7 +51,6 @@ type LightningLoader struct { lightningGlobalConfig *lcfg.GlobalConfig cfg *config.SubTaskConfig - checkPoint CheckPoint checkPointList *LightningCheckpointList logger log.Logger @@ -129,15 +126,10 @@ func (l *LightningLoader) Init(ctx context.Context) (err error) { return err } - checkpoint, err := newRemoteCheckPoint(tctx, l.cfg, l.checkpointID()) + checkpointList := NewLightningCheckpointList(l.toDB, l.cfg.Name, l.cfg.SourceID, l.cfg.MetaSchema) + err = checkpointList.Prepare(ctx) if err == nil { - l.checkPoint = checkpoint - checkpointList := NewLightningCheckpointList(l.toDB, l.cfg.MetaSchema) - err1 := checkpointList.Prepare(ctx) - if err1 == nil { - l.checkPointList = checkpointList - } - err = err1 + l.checkPointList = checkpointList } failpoint.Inject("ignoreLoadCheckpointErr", func(_ failpoint.Value) { l.logger.Info("", zap.String("failpoint", "ignoreLoadCheckpointErr")) @@ -164,6 +156,9 @@ func (l *LightningLoader) runLightning(ctx context.Context, cfg *lcfg.Config) er l.Lock() l.cancel = cancel l.Unlock() + if err := l.checkPointList.UpdateStatus(ctx, lightningStatusRunning); err != nil { + return err + } err := l.core.RunOnce(taskCtx, cfg, nil) failpoint.Inject("LightningLoadDataSlowDown", nil) failpoint.Inject("LightningLoadDataSlowDownByTask", func(val failpoint.Value) { @@ -180,39 +175,27 @@ func (l *LightningLoader) runLightning(ctx context.Context, cfg *lcfg.Config) er } func (l *LightningLoader) restore(ctx context.Context) error { - tctx := tcontext.NewContext(ctx, l.logger) if err := putLoadTask(l.cli, l.cfg, l.workerName); err != nil { return err } - if err := l.checkPoint.Init(tctx, lightningCheckpointFile, 1); err != nil { - return err - } - if err := l.checkPoint.Load(tctx); err != nil { - return err - } - db2Tables := make(map[string]Tables2DataFiles) - tables := make(Tables2DataFiles) - files := make(DataFiles, 0, 1) - files = append(files, lightningCheckpointFile) - tables[lightningCheckpointTable] = files - db2Tables[lightningCheckpointDB] = tables - var err error - if err = l.checkPoint.CalcProgress(db2Tables); err != nil { + + status, err := l.checkPointList.taskStatus(ctx) + if err != nil { return err } - if !l.checkPoint.IsTableFinished(lightningCheckpointDB, lightningCheckpointTable) { - if l.checkPointList != nil { - if err = l.checkPointList.RegisterCheckPoint(ctx, l.workerName, l.cfg.Name); err != nil { - return err - } + + if status < lightningStatusFinished { + if err = l.checkPointList.RegisterCheckPoint(ctx); err != nil { + return err } cfg := lcfg.NewConfig() if err = cfg.LoadFromGlobal(l.lightningGlobalConfig); err != nil { return err } cfg.Routes = l.cfg.RouteRules - cfg.Checkpoint.Driver = lcfg.CheckpointDriverMySQL - cfg.Checkpoint.Schema = config.TiDBLightningCheckpointPrefix + dbutil.TableName(l.workerName, l.cfg.Name) + cfg.Checkpoint.Driver = lcfg.CheckpointDriverFile + cpPath := filepath.Join(l.cfg.LoaderConfig.Dir, lightningCheckpointFileName) + cfg.Checkpoint.DSN = cpPath cfg.Checkpoint.KeepAfterSuccess = lcfg.CheckpointOrigin cfg.TiDB.Vars = make(map[string]string) if l.cfg.To.Session != nil { @@ -223,21 +206,9 @@ func (l *LightningLoader) restore(ctx context.Context) error { cfg.TiDB.StrSQLMode = l.cfg.LoaderConfig.SQLMode cfg.TiDB.Vars = map[string]string{"time_zone": l.timeZone} err = l.runLightning(ctx, cfg) - if err == nil { - // lightning will auto deregister tls when task done, so we need to register it again for removing checkpoint - if l.cfg.To.Security != nil { - if registerErr := cfg.Security.RegisterMySQL(); registerErr != nil { - return terror.ErrConnRegistryTLSConfig.Delegate(registerErr) - } - defer cfg.Security.DeregisterMySQL() - } - err = lightning.CheckpointRemove(ctx, cfg, "all") - } if err == nil { l.finish.Store(true) - offsetSQL := l.checkPoint.GenSQL(lightningCheckpointFile, 1) - err = l.toDBConns[0].executeSQL(tctx, []string{offsetSQL}) - _ = l.checkPoint.UpdateOffset(lightningCheckpointFile, 1) + err = l.checkPointList.UpdateStatus(ctx, lightningStatusFinished) } else { l.logger.Error("failed to runlightning", zap.Error(err)) } @@ -303,14 +274,13 @@ func (l *LightningLoader) isClosed() bool { // IsFreshTask implements Unit.IsFreshTask. func (l *LightningLoader) IsFreshTask(ctx context.Context) (bool, error) { - count, err := l.checkPoint.Count(tcontext.NewContext(ctx, l.logger)) - return count == 0, err + status, err := l.checkPointList.taskStatus(ctx) + return status == lightningStatusInit, err } // Close does graceful shutdown. func (l *LightningLoader) Close() { l.Pause() - l.checkPoint.Close() l.checkPointList.Close() l.closed.Store(true) } @@ -367,16 +337,3 @@ func (l *LightningLoader) Status(_ *binlog.SourceStatus) interface{} { } return s } - -// checkpointID returns ID which used for checkpoint table. -func (l *LightningLoader) checkpointID() string { - if len(l.cfg.SourceID) > 0 { - return l.cfg.SourceID - } - dir, err := filepath.Abs(l.cfg.Dir) - if err != nil { - l.logger.Warn("get abs dir", zap.String("directory", l.cfg.Dir), log.ShortError(err)) - return l.cfg.Dir - } - return shortSha1(dir) -} diff --git a/dm/openapi/gen.client.go b/dm/openapi/gen.client.go index 3075c625ac1..d4aebd80ee9 100644 --- a/dm/openapi/gen.client.go +++ b/dm/openapi/gen.client.go @@ -149,6 +149,28 @@ type ClientInterface interface { DMAPITransferSource(ctx context.Context, sourceName string, body DMAPITransferSourceJSONRequestBody, reqEditors ...RequestEditorFn) (*http.Response, error) + // DMAPIGetTaskConfigList request + DMAPIGetTaskConfigList(ctx context.Context, reqEditors ...RequestEditorFn) (*http.Response, error) + + // DMAPICreateTaskConfig request with any body + DMAPICreateTaskConfigWithBody(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) + + DMAPICreateTaskConfig(ctx context.Context, body DMAPICreateTaskConfigJSONRequestBody, reqEditors ...RequestEditorFn) (*http.Response, error) + + // DMAPIImportTaskConfig request with any body + DMAPIImportTaskConfigWithBody(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) + + DMAPIImportTaskConfig(ctx context.Context, body DMAPIImportTaskConfigJSONRequestBody, reqEditors ...RequestEditorFn) (*http.Response, error) + + // DMAPIDeleteTaskConfig request + DMAPIDeleteTaskConfig(ctx context.Context, taskName string, reqEditors ...RequestEditorFn) (*http.Response, error) + + // DMAPIGetTaskConfig request + DMAPIGetTaskConfig(ctx context.Context, taskName string, reqEditors ...RequestEditorFn) (*http.Response, error) + + // DMAPUpdateTaskConfig request + DMAPUpdateTaskConfig(ctx context.Context, taskName string, reqEditors ...RequestEditorFn) (*http.Response, error) + // DMAPIGetTaskList request DMAPIGetTaskList(ctx context.Context, reqEditors ...RequestEditorFn) (*http.Response, error) @@ -443,6 +465,102 @@ func (c *Client) DMAPITransferSource(ctx context.Context, sourceName string, bod return c.Client.Do(req) } +func (c *Client) DMAPIGetTaskConfigList(ctx context.Context, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewDMAPIGetTaskConfigListRequest(c.Server) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) DMAPICreateTaskConfigWithBody(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewDMAPICreateTaskConfigRequestWithBody(c.Server, contentType, body) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) DMAPICreateTaskConfig(ctx context.Context, body DMAPICreateTaskConfigJSONRequestBody, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewDMAPICreateTaskConfigRequest(c.Server, body) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) DMAPIImportTaskConfigWithBody(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewDMAPIImportTaskConfigRequestWithBody(c.Server, contentType, body) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) DMAPIImportTaskConfig(ctx context.Context, body DMAPIImportTaskConfigJSONRequestBody, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewDMAPIImportTaskConfigRequest(c.Server, body) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) DMAPIDeleteTaskConfig(ctx context.Context, taskName string, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewDMAPIDeleteTaskConfigRequest(c.Server, taskName) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) DMAPIGetTaskConfig(ctx context.Context, taskName string, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewDMAPIGetTaskConfigRequest(c.Server, taskName) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) DMAPUpdateTaskConfig(ctx context.Context, taskName string, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewDMAPUpdateTaskConfigRequest(c.Server, taskName) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + func (c *Client) DMAPIGetTaskList(ctx context.Context, reqEditors ...RequestEditorFn) (*http.Response, error) { req, err := NewDMAPIGetTaskListRequest(c.Server) if err != nil { @@ -1254,8 +1372,8 @@ func NewDMAPITransferSourceRequestWithBody(server string, sourceName string, con return req, nil } -// NewDMAPIGetTaskListRequest generates requests for DMAPIGetTaskList -func NewDMAPIGetTaskListRequest(server string) (*http.Request, error) { +// NewDMAPIGetTaskConfigListRequest generates requests for DMAPIGetTaskConfigList +func NewDMAPIGetTaskConfigListRequest(server string) (*http.Request, error) { var err error serverURL, err := url.Parse(server) @@ -1263,7 +1381,7 @@ func NewDMAPIGetTaskListRequest(server string) (*http.Request, error) { return nil, err } - operationPath := fmt.Sprintf("/api/v1/tasks") + operationPath := fmt.Sprintf("/api/v1/task/configs") if operationPath[0] == '/' { operationPath = "." + operationPath } @@ -1281,19 +1399,19 @@ func NewDMAPIGetTaskListRequest(server string) (*http.Request, error) { return req, nil } -// NewDMAPIStartTaskRequest calls the generic DMAPIStartTask builder with application/json body -func NewDMAPIStartTaskRequest(server string, body DMAPIStartTaskJSONRequestBody) (*http.Request, error) { +// NewDMAPICreateTaskConfigRequest calls the generic DMAPICreateTaskConfig builder with application/json body +func NewDMAPICreateTaskConfigRequest(server string, body DMAPICreateTaskConfigJSONRequestBody) (*http.Request, error) { var bodyReader io.Reader buf, err := json.Marshal(body) if err != nil { return nil, err } bodyReader = bytes.NewReader(buf) - return NewDMAPIStartTaskRequestWithBody(server, "application/json", bodyReader) + return NewDMAPICreateTaskConfigRequestWithBody(server, "application/json", bodyReader) } -// NewDMAPIStartTaskRequestWithBody generates requests for DMAPIStartTask with any type of body -func NewDMAPIStartTaskRequestWithBody(server string, contentType string, body io.Reader) (*http.Request, error) { +// NewDMAPICreateTaskConfigRequestWithBody generates requests for DMAPICreateTaskConfig with any type of body +func NewDMAPICreateTaskConfigRequestWithBody(server string, contentType string, body io.Reader) (*http.Request, error) { var err error serverURL, err := url.Parse(server) @@ -1301,7 +1419,7 @@ func NewDMAPIStartTaskRequestWithBody(server string, contentType string, body io return nil, err } - operationPath := fmt.Sprintf("/api/v1/tasks") + operationPath := fmt.Sprintf("/api/v1/task/configs") if operationPath[0] == '/' { operationPath = "." + operationPath } @@ -1321,86 +1439,27 @@ func NewDMAPIStartTaskRequestWithBody(server string, contentType string, body io return req, nil } -// NewDMAPIDeleteTaskRequest generates requests for DMAPIDeleteTask -func NewDMAPIDeleteTaskRequest(server string, taskName string, params *DMAPIDeleteTaskParams) (*http.Request, error) { - var err error - - var pathParam0 string - - pathParam0, err = runtime.StyleParamWithLocation("simple", false, "task-name", runtime.ParamLocationPath, taskName) - if err != nil { - return nil, err - } - - serverURL, err := url.Parse(server) - if err != nil { - return nil, err - } - - operationPath := fmt.Sprintf("/api/v1/tasks/%s", pathParam0) - if operationPath[0] == '/' { - operationPath = "." + operationPath - } - - queryURL, err := serverURL.Parse(operationPath) - if err != nil { - return nil, err - } - - queryValues := queryURL.Query() - - if params.SourceNameList != nil { - if queryFrag, err := runtime.StyleParamWithLocation("form", true, "source_name_list", runtime.ParamLocationQuery, *params.SourceNameList); err != nil { - return nil, err - } else if parsed, err := url.ParseQuery(queryFrag); err != nil { - return nil, err - } else { - for k, v := range parsed { - for _, v2 := range v { - queryValues.Add(k, v2) - } - } - } - } - - queryURL.RawQuery = queryValues.Encode() - - req, err := http.NewRequest("DELETE", queryURL.String(), nil) - if err != nil { - return nil, err - } - - return req, nil -} - -// NewDMAPIPauseTaskRequest calls the generic DMAPIPauseTask builder with application/json body -func NewDMAPIPauseTaskRequest(server string, taskName string, body DMAPIPauseTaskJSONRequestBody) (*http.Request, error) { +// NewDMAPIImportTaskConfigRequest calls the generic DMAPIImportTaskConfig builder with application/json body +func NewDMAPIImportTaskConfigRequest(server string, body DMAPIImportTaskConfigJSONRequestBody) (*http.Request, error) { var bodyReader io.Reader buf, err := json.Marshal(body) if err != nil { return nil, err } bodyReader = bytes.NewReader(buf) - return NewDMAPIPauseTaskRequestWithBody(server, taskName, "application/json", bodyReader) + return NewDMAPIImportTaskConfigRequestWithBody(server, "application/json", bodyReader) } -// NewDMAPIPauseTaskRequestWithBody generates requests for DMAPIPauseTask with any type of body -func NewDMAPIPauseTaskRequestWithBody(server string, taskName string, contentType string, body io.Reader) (*http.Request, error) { +// NewDMAPIImportTaskConfigRequestWithBody generates requests for DMAPIImportTaskConfig with any type of body +func NewDMAPIImportTaskConfigRequestWithBody(server string, contentType string, body io.Reader) (*http.Request, error) { var err error - var pathParam0 string - - pathParam0, err = runtime.StyleParamWithLocation("simple", false, "task-name", runtime.ParamLocationPath, taskName) - if err != nil { - return nil, err - } - serverURL, err := url.Parse(server) if err != nil { return nil, err } - operationPath := fmt.Sprintf("/api/v1/tasks/%s/pause", pathParam0) + operationPath := fmt.Sprintf("/api/v1/task/configs/import") if operationPath[0] == '/' { operationPath = "." + operationPath } @@ -1420,19 +1479,8 @@ func NewDMAPIPauseTaskRequestWithBody(server string, taskName string, contentTyp return req, nil } -// NewDMAPIResumeTaskRequest calls the generic DMAPIResumeTask builder with application/json body -func NewDMAPIResumeTaskRequest(server string, taskName string, body DMAPIResumeTaskJSONRequestBody) (*http.Request, error) { - var bodyReader io.Reader - buf, err := json.Marshal(body) - if err != nil { - return nil, err - } - bodyReader = bytes.NewReader(buf) - return NewDMAPIResumeTaskRequestWithBody(server, taskName, "application/json", bodyReader) -} - -// NewDMAPIResumeTaskRequestWithBody generates requests for DMAPIResumeTask with any type of body -func NewDMAPIResumeTaskRequestWithBody(server string, taskName string, contentType string, body io.Reader) (*http.Request, error) { +// NewDMAPIDeleteTaskConfigRequest generates requests for DMAPIDeleteTaskConfig +func NewDMAPIDeleteTaskConfigRequest(server string, taskName string) (*http.Request, error) { var err error var pathParam0 string @@ -1447,7 +1495,7 @@ func NewDMAPIResumeTaskRequestWithBody(server string, taskName string, contentTy return nil, err } - operationPath := fmt.Sprintf("/api/v1/tasks/%s/resume", pathParam0) + operationPath := fmt.Sprintf("/api/v1/task/configs/%s", pathParam0) if operationPath[0] == '/' { operationPath = "." + operationPath } @@ -1457,18 +1505,16 @@ func NewDMAPIResumeTaskRequestWithBody(server string, taskName string, contentTy return nil, err } - req, err := http.NewRequest("POST", queryURL.String(), body) + req, err := http.NewRequest("DELETE", queryURL.String(), nil) if err != nil { return nil, err } - req.Header.Add("Content-Type", contentType) - return req, nil } -// NewDMAPIGetSchemaListByTaskAndSourceRequest generates requests for DMAPIGetSchemaListByTaskAndSource -func NewDMAPIGetSchemaListByTaskAndSourceRequest(server string, taskName string, sourceName string) (*http.Request, error) { +// NewDMAPIGetTaskConfigRequest generates requests for DMAPIGetTaskConfig +func NewDMAPIGetTaskConfigRequest(server string, taskName string) (*http.Request, error) { var err error var pathParam0 string @@ -1478,19 +1524,12 @@ func NewDMAPIGetSchemaListByTaskAndSourceRequest(server string, taskName string, return nil, err } - var pathParam1 string - - pathParam1, err = runtime.StyleParamWithLocation("simple", false, "source-name", runtime.ParamLocationPath, sourceName) - if err != nil { - return nil, err - } - serverURL, err := url.Parse(server) if err != nil { return nil, err } - operationPath := fmt.Sprintf("/api/v1/tasks/%s/sources/%s/schemas", pathParam0, pathParam1) + operationPath := fmt.Sprintf("/api/v1/task/configs/%s", pathParam0) if operationPath[0] == '/' { operationPath = "." + operationPath } @@ -1508,8 +1547,8 @@ func NewDMAPIGetSchemaListByTaskAndSourceRequest(server string, taskName string, return req, nil } -// NewDMAPIGetTableListByTaskAndSourceRequest generates requests for DMAPIGetTableListByTaskAndSource -func NewDMAPIGetTableListByTaskAndSourceRequest(server string, taskName string, sourceName string, schemaName string) (*http.Request, error) { +// NewDMAPUpdateTaskConfigRequest generates requests for DMAPUpdateTaskConfig +func NewDMAPUpdateTaskConfigRequest(server string, taskName string) (*http.Request, error) { var err error var pathParam0 string @@ -1519,26 +1558,12 @@ func NewDMAPIGetTableListByTaskAndSourceRequest(server string, taskName string, return nil, err } - var pathParam1 string - - pathParam1, err = runtime.StyleParamWithLocation("simple", false, "source-name", runtime.ParamLocationPath, sourceName) - if err != nil { - return nil, err - } - - var pathParam2 string - - pathParam2, err = runtime.StyleParamWithLocation("simple", false, "schema-name", runtime.ParamLocationPath, schemaName) - if err != nil { - return nil, err - } - serverURL, err := url.Parse(server) if err != nil { return nil, err } - operationPath := fmt.Sprintf("/api/v1/tasks/%s/sources/%s/schemas/%s", pathParam0, pathParam1, pathParam2) + operationPath := fmt.Sprintf("/api/v1/task/configs/%s", pathParam0) if operationPath[0] == '/' { operationPath = "." + operationPath } @@ -1548,7 +1573,7 @@ func NewDMAPIGetTableListByTaskAndSourceRequest(server string, taskName string, return nil, err } - req, err := http.NewRequest("GET", queryURL.String(), nil) + req, err := http.NewRequest("PUT", queryURL.String(), nil) if err != nil { return nil, err } @@ -1556,44 +1581,54 @@ func NewDMAPIGetTableListByTaskAndSourceRequest(server string, taskName string, return req, nil } -// NewDMAPIDeleteTableStructureRequest generates requests for DMAPIDeleteTableStructure -func NewDMAPIDeleteTableStructureRequest(server string, taskName string, sourceName string, schemaName string, tableName string) (*http.Request, error) { +// NewDMAPIGetTaskListRequest generates requests for DMAPIGetTaskList +func NewDMAPIGetTaskListRequest(server string) (*http.Request, error) { var err error - var pathParam0 string - - pathParam0, err = runtime.StyleParamWithLocation("simple", false, "task-name", runtime.ParamLocationPath, taskName) + serverURL, err := url.Parse(server) if err != nil { return nil, err } - var pathParam1 string + operationPath := fmt.Sprintf("/api/v1/tasks") + if operationPath[0] == '/' { + operationPath = "." + operationPath + } - pathParam1, err = runtime.StyleParamWithLocation("simple", false, "source-name", runtime.ParamLocationPath, sourceName) + queryURL, err := serverURL.Parse(operationPath) if err != nil { return nil, err } - var pathParam2 string - - pathParam2, err = runtime.StyleParamWithLocation("simple", false, "schema-name", runtime.ParamLocationPath, schemaName) + req, err := http.NewRequest("GET", queryURL.String(), nil) if err != nil { return nil, err } - var pathParam3 string + return req, nil +} - pathParam3, err = runtime.StyleParamWithLocation("simple", false, "table-name", runtime.ParamLocationPath, tableName) +// NewDMAPIStartTaskRequest calls the generic DMAPIStartTask builder with application/json body +func NewDMAPIStartTaskRequest(server string, body DMAPIStartTaskJSONRequestBody) (*http.Request, error) { + var bodyReader io.Reader + buf, err := json.Marshal(body) if err != nil { return nil, err } + bodyReader = bytes.NewReader(buf) + return NewDMAPIStartTaskRequestWithBody(server, "application/json", bodyReader) +} + +// NewDMAPIStartTaskRequestWithBody generates requests for DMAPIStartTask with any type of body +func NewDMAPIStartTaskRequestWithBody(server string, contentType string, body io.Reader) (*http.Request, error) { + var err error serverURL, err := url.Parse(server) if err != nil { return nil, err } - operationPath := fmt.Sprintf("/api/v1/tasks/%s/sources/%s/schemas/%s/%s", pathParam0, pathParam1, pathParam2, pathParam3) + operationPath := fmt.Sprintf("/api/v1/tasks") if operationPath[0] == '/' { operationPath = "." + operationPath } @@ -1603,16 +1638,18 @@ func NewDMAPIDeleteTableStructureRequest(server string, taskName string, sourceN return nil, err } - req, err := http.NewRequest("DELETE", queryURL.String(), nil) + req, err := http.NewRequest("POST", queryURL.String(), body) if err != nil { return nil, err } + req.Header.Add("Content-Type", contentType) + return req, nil } -// NewDMAPIGetTableStructureRequest generates requests for DMAPIGetTableStructure -func NewDMAPIGetTableStructureRequest(server string, taskName string, sourceName string, schemaName string, tableName string) (*http.Request, error) { +// NewDMAPIDeleteTaskRequest generates requests for DMAPIDeleteTask +func NewDMAPIDeleteTaskRequest(server string, taskName string, params *DMAPIDeleteTaskParams) (*http.Request, error) { var err error var pathParam0 string @@ -1622,9 +1659,299 @@ func NewDMAPIGetTableStructureRequest(server string, taskName string, sourceName return nil, err } - var pathParam1 string - - pathParam1, err = runtime.StyleParamWithLocation("simple", false, "source-name", runtime.ParamLocationPath, sourceName) + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/api/v1/tasks/%s", pathParam0) + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + queryValues := queryURL.Query() + + if params.SourceNameList != nil { + if queryFrag, err := runtime.StyleParamWithLocation("form", true, "source_name_list", runtime.ParamLocationQuery, *params.SourceNameList); err != nil { + return nil, err + } else if parsed, err := url.ParseQuery(queryFrag); err != nil { + return nil, err + } else { + for k, v := range parsed { + for _, v2 := range v { + queryValues.Add(k, v2) + } + } + } + } + + queryURL.RawQuery = queryValues.Encode() + + req, err := http.NewRequest("DELETE", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + +// NewDMAPIPauseTaskRequest calls the generic DMAPIPauseTask builder with application/json body +func NewDMAPIPauseTaskRequest(server string, taskName string, body DMAPIPauseTaskJSONRequestBody) (*http.Request, error) { + var bodyReader io.Reader + buf, err := json.Marshal(body) + if err != nil { + return nil, err + } + bodyReader = bytes.NewReader(buf) + return NewDMAPIPauseTaskRequestWithBody(server, taskName, "application/json", bodyReader) +} + +// NewDMAPIPauseTaskRequestWithBody generates requests for DMAPIPauseTask with any type of body +func NewDMAPIPauseTaskRequestWithBody(server string, taskName string, contentType string, body io.Reader) (*http.Request, error) { + var err error + + var pathParam0 string + + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "task-name", runtime.ParamLocationPath, taskName) + if err != nil { + return nil, err + } + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/api/v1/tasks/%s/pause", pathParam0) + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("POST", queryURL.String(), body) + if err != nil { + return nil, err + } + + req.Header.Add("Content-Type", contentType) + + return req, nil +} + +// NewDMAPIResumeTaskRequest calls the generic DMAPIResumeTask builder with application/json body +func NewDMAPIResumeTaskRequest(server string, taskName string, body DMAPIResumeTaskJSONRequestBody) (*http.Request, error) { + var bodyReader io.Reader + buf, err := json.Marshal(body) + if err != nil { + return nil, err + } + bodyReader = bytes.NewReader(buf) + return NewDMAPIResumeTaskRequestWithBody(server, taskName, "application/json", bodyReader) +} + +// NewDMAPIResumeTaskRequestWithBody generates requests for DMAPIResumeTask with any type of body +func NewDMAPIResumeTaskRequestWithBody(server string, taskName string, contentType string, body io.Reader) (*http.Request, error) { + var err error + + var pathParam0 string + + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "task-name", runtime.ParamLocationPath, taskName) + if err != nil { + return nil, err + } + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/api/v1/tasks/%s/resume", pathParam0) + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("POST", queryURL.String(), body) + if err != nil { + return nil, err + } + + req.Header.Add("Content-Type", contentType) + + return req, nil +} + +// NewDMAPIGetSchemaListByTaskAndSourceRequest generates requests for DMAPIGetSchemaListByTaskAndSource +func NewDMAPIGetSchemaListByTaskAndSourceRequest(server string, taskName string, sourceName string) (*http.Request, error) { + var err error + + var pathParam0 string + + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "task-name", runtime.ParamLocationPath, taskName) + if err != nil { + return nil, err + } + + var pathParam1 string + + pathParam1, err = runtime.StyleParamWithLocation("simple", false, "source-name", runtime.ParamLocationPath, sourceName) + if err != nil { + return nil, err + } + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/api/v1/tasks/%s/sources/%s/schemas", pathParam0, pathParam1) + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("GET", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + +// NewDMAPIGetTableListByTaskAndSourceRequest generates requests for DMAPIGetTableListByTaskAndSource +func NewDMAPIGetTableListByTaskAndSourceRequest(server string, taskName string, sourceName string, schemaName string) (*http.Request, error) { + var err error + + var pathParam0 string + + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "task-name", runtime.ParamLocationPath, taskName) + if err != nil { + return nil, err + } + + var pathParam1 string + + pathParam1, err = runtime.StyleParamWithLocation("simple", false, "source-name", runtime.ParamLocationPath, sourceName) + if err != nil { + return nil, err + } + + var pathParam2 string + + pathParam2, err = runtime.StyleParamWithLocation("simple", false, "schema-name", runtime.ParamLocationPath, schemaName) + if err != nil { + return nil, err + } + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/api/v1/tasks/%s/sources/%s/schemas/%s", pathParam0, pathParam1, pathParam2) + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("GET", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + +// NewDMAPIDeleteTableStructureRequest generates requests for DMAPIDeleteTableStructure +func NewDMAPIDeleteTableStructureRequest(server string, taskName string, sourceName string, schemaName string, tableName string) (*http.Request, error) { + var err error + + var pathParam0 string + + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "task-name", runtime.ParamLocationPath, taskName) + if err != nil { + return nil, err + } + + var pathParam1 string + + pathParam1, err = runtime.StyleParamWithLocation("simple", false, "source-name", runtime.ParamLocationPath, sourceName) + if err != nil { + return nil, err + } + + var pathParam2 string + + pathParam2, err = runtime.StyleParamWithLocation("simple", false, "schema-name", runtime.ParamLocationPath, schemaName) + if err != nil { + return nil, err + } + + var pathParam3 string + + pathParam3, err = runtime.StyleParamWithLocation("simple", false, "table-name", runtime.ParamLocationPath, tableName) + if err != nil { + return nil, err + } + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/api/v1/tasks/%s/sources/%s/schemas/%s/%s", pathParam0, pathParam1, pathParam2, pathParam3) + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("DELETE", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + +// NewDMAPIGetTableStructureRequest generates requests for DMAPIGetTableStructure +func NewDMAPIGetTableStructureRequest(server string, taskName string, sourceName string, schemaName string, tableName string) (*http.Request, error) { + var err error + + var pathParam0 string + + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "task-name", runtime.ParamLocationPath, taskName) + if err != nil { + return nil, err + } + + var pathParam1 string + + pathParam1, err = runtime.StyleParamWithLocation("simple", false, "source-name", runtime.ParamLocationPath, sourceName) if err != nil { return nil, err } @@ -1888,6 +2215,28 @@ type ClientWithResponsesInterface interface { DMAPITransferSourceWithResponse(ctx context.Context, sourceName string, body DMAPITransferSourceJSONRequestBody, reqEditors ...RequestEditorFn) (*DMAPITransferSourceResponse, error) + // DMAPIGetTaskConfigList request + DMAPIGetTaskConfigListWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*DMAPIGetTaskConfigListResponse, error) + + // DMAPICreateTaskConfig request with any body + DMAPICreateTaskConfigWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*DMAPICreateTaskConfigResponse, error) + + DMAPICreateTaskConfigWithResponse(ctx context.Context, body DMAPICreateTaskConfigJSONRequestBody, reqEditors ...RequestEditorFn) (*DMAPICreateTaskConfigResponse, error) + + // DMAPIImportTaskConfig request with any body + DMAPIImportTaskConfigWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*DMAPIImportTaskConfigResponse, error) + + DMAPIImportTaskConfigWithResponse(ctx context.Context, body DMAPIImportTaskConfigJSONRequestBody, reqEditors ...RequestEditorFn) (*DMAPIImportTaskConfigResponse, error) + + // DMAPIDeleteTaskConfig request + DMAPIDeleteTaskConfigWithResponse(ctx context.Context, taskName string, reqEditors ...RequestEditorFn) (*DMAPIDeleteTaskConfigResponse, error) + + // DMAPIGetTaskConfig request + DMAPIGetTaskConfigWithResponse(ctx context.Context, taskName string, reqEditors ...RequestEditorFn) (*DMAPIGetTaskConfigResponse, error) + + // DMAPUpdateTaskConfig request + DMAPUpdateTaskConfigWithResponse(ctx context.Context, taskName string, reqEditors ...RequestEditorFn) (*DMAPUpdateTaskConfigResponse, error) + // DMAPIGetTaskList request DMAPIGetTaskListWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*DMAPIGetTaskListResponse, error) @@ -2092,7 +2441,142 @@ type DMAPICreateSourceResponse struct { } // Status returns HTTPResponse.Status -func (r DMAPICreateSourceResponse) Status() string { +func (r DMAPICreateSourceResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r DMAPICreateSourceResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type DMAPIDeleteSourceResponse struct { + Body []byte + HTTPResponse *http.Response + JSON204 *DeleteSourceResponse + JSON400 *ErrorWithMessage +} + +// Status returns HTTPResponse.Status +func (r DMAPIDeleteSourceResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r DMAPIDeleteSourceResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type DMAPIPauseRelayResponse struct { + Body []byte + HTTPResponse *http.Response + JSON400 *ErrorWithMessage +} + +// Status returns HTTPResponse.Status +func (r DMAPIPauseRelayResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r DMAPIPauseRelayResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type DMAPIResumeRelayResponse struct { + Body []byte + HTTPResponse *http.Response + JSON400 *ErrorWithMessage +} + +// Status returns HTTPResponse.Status +func (r DMAPIResumeRelayResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r DMAPIResumeRelayResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type DMAPIGetSourceSchemaListResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *SchemaNameList + JSON400 *ErrorWithMessage +} + +// Status returns HTTPResponse.Status +func (r DMAPIGetSourceSchemaListResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r DMAPIGetSourceSchemaListResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type DMAPIGetSourceTableListResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *TableNameList + JSON400 *ErrorWithMessage +} + +// Status returns HTTPResponse.Status +func (r DMAPIGetSourceTableListResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r DMAPIGetSourceTableListResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type DMAPIStartRelayResponse struct { + Body []byte + HTTPResponse *http.Response + JSON400 *ErrorWithMessage +} + +// Status returns HTTPResponse.Status +func (r DMAPIStartRelayResponse) Status() string { if r.HTTPResponse != nil { return r.HTTPResponse.Status } @@ -2100,22 +2584,22 @@ func (r DMAPICreateSourceResponse) Status() string { } // StatusCode returns HTTPResponse.StatusCode -func (r DMAPICreateSourceResponse) StatusCode() int { +func (r DMAPIStartRelayResponse) StatusCode() int { if r.HTTPResponse != nil { return r.HTTPResponse.StatusCode } return 0 } -type DMAPIDeleteSourceResponse struct { +type DMAPIGetSourceStatusResponse struct { Body []byte HTTPResponse *http.Response - JSON204 *DeleteSourceResponse + JSON200 *GetSourceStatusResponse JSON400 *ErrorWithMessage } // Status returns HTTPResponse.Status -func (r DMAPIDeleteSourceResponse) Status() string { +func (r DMAPIGetSourceStatusResponse) Status() string { if r.HTTPResponse != nil { return r.HTTPResponse.Status } @@ -2123,21 +2607,21 @@ func (r DMAPIDeleteSourceResponse) Status() string { } // StatusCode returns HTTPResponse.StatusCode -func (r DMAPIDeleteSourceResponse) StatusCode() int { +func (r DMAPIGetSourceStatusResponse) StatusCode() int { if r.HTTPResponse != nil { return r.HTTPResponse.StatusCode } return 0 } -type DMAPIPauseRelayResponse struct { +type DMAPIStopRelayResponse struct { Body []byte HTTPResponse *http.Response JSON400 *ErrorWithMessage } // Status returns HTTPResponse.Status -func (r DMAPIPauseRelayResponse) Status() string { +func (r DMAPIStopRelayResponse) Status() string { if r.HTTPResponse != nil { return r.HTTPResponse.Status } @@ -2145,21 +2629,21 @@ func (r DMAPIPauseRelayResponse) Status() string { } // StatusCode returns HTTPResponse.StatusCode -func (r DMAPIPauseRelayResponse) StatusCode() int { +func (r DMAPIStopRelayResponse) StatusCode() int { if r.HTTPResponse != nil { return r.HTTPResponse.StatusCode } return 0 } -type DMAPIResumeRelayResponse struct { +type DMAPITransferSourceResponse struct { Body []byte HTTPResponse *http.Response JSON400 *ErrorWithMessage } // Status returns HTTPResponse.Status -func (r DMAPIResumeRelayResponse) Status() string { +func (r DMAPITransferSourceResponse) Status() string { if r.HTTPResponse != nil { return r.HTTPResponse.Status } @@ -2167,22 +2651,22 @@ func (r DMAPIResumeRelayResponse) Status() string { } // StatusCode returns HTTPResponse.StatusCode -func (r DMAPIResumeRelayResponse) StatusCode() int { +func (r DMAPITransferSourceResponse) StatusCode() int { if r.HTTPResponse != nil { return r.HTTPResponse.StatusCode } return 0 } -type DMAPIGetSourceSchemaListResponse struct { +type DMAPIGetTaskConfigListResponse struct { Body []byte HTTPResponse *http.Response - JSON200 *SchemaNameList + JSON200 *GetTaskListResponse JSON400 *ErrorWithMessage } // Status returns HTTPResponse.Status -func (r DMAPIGetSourceSchemaListResponse) Status() string { +func (r DMAPIGetTaskConfigListResponse) Status() string { if r.HTTPResponse != nil { return r.HTTPResponse.Status } @@ -2190,22 +2674,22 @@ func (r DMAPIGetSourceSchemaListResponse) Status() string { } // StatusCode returns HTTPResponse.StatusCode -func (r DMAPIGetSourceSchemaListResponse) StatusCode() int { +func (r DMAPIGetTaskConfigListResponse) StatusCode() int { if r.HTTPResponse != nil { return r.HTTPResponse.StatusCode } return 0 } -type DMAPIGetSourceTableListResponse struct { +type DMAPICreateTaskConfigResponse struct { Body []byte HTTPResponse *http.Response - JSON200 *TableNameList + JSON201 *Task JSON400 *ErrorWithMessage } // Status returns HTTPResponse.Status -func (r DMAPIGetSourceTableListResponse) Status() string { +func (r DMAPICreateTaskConfigResponse) Status() string { if r.HTTPResponse != nil { return r.HTTPResponse.Status } @@ -2213,21 +2697,22 @@ func (r DMAPIGetSourceTableListResponse) Status() string { } // StatusCode returns HTTPResponse.StatusCode -func (r DMAPIGetSourceTableListResponse) StatusCode() int { +func (r DMAPICreateTaskConfigResponse) StatusCode() int { if r.HTTPResponse != nil { return r.HTTPResponse.StatusCode } return 0 } -type DMAPIStartRelayResponse struct { +type DMAPIImportTaskConfigResponse struct { Body []byte HTTPResponse *http.Response + JSON202 *TaskConfigResponse JSON400 *ErrorWithMessage } // Status returns HTTPResponse.Status -func (r DMAPIStartRelayResponse) Status() string { +func (r DMAPIImportTaskConfigResponse) Status() string { if r.HTTPResponse != nil { return r.HTTPResponse.Status } @@ -2235,22 +2720,21 @@ func (r DMAPIStartRelayResponse) Status() string { } // StatusCode returns HTTPResponse.StatusCode -func (r DMAPIStartRelayResponse) StatusCode() int { +func (r DMAPIImportTaskConfigResponse) StatusCode() int { if r.HTTPResponse != nil { return r.HTTPResponse.StatusCode } return 0 } -type DMAPIGetSourceStatusResponse struct { +type DMAPIDeleteTaskConfigResponse struct { Body []byte HTTPResponse *http.Response - JSON200 *GetSourceStatusResponse JSON400 *ErrorWithMessage } // Status returns HTTPResponse.Status -func (r DMAPIGetSourceStatusResponse) Status() string { +func (r DMAPIDeleteTaskConfigResponse) Status() string { if r.HTTPResponse != nil { return r.HTTPResponse.Status } @@ -2258,21 +2742,22 @@ func (r DMAPIGetSourceStatusResponse) Status() string { } // StatusCode returns HTTPResponse.StatusCode -func (r DMAPIGetSourceStatusResponse) StatusCode() int { +func (r DMAPIDeleteTaskConfigResponse) StatusCode() int { if r.HTTPResponse != nil { return r.HTTPResponse.StatusCode } return 0 } -type DMAPIStopRelayResponse struct { +type DMAPIGetTaskConfigResponse struct { Body []byte HTTPResponse *http.Response + JSON200 *Task JSON400 *ErrorWithMessage } // Status returns HTTPResponse.Status -func (r DMAPIStopRelayResponse) Status() string { +func (r DMAPIGetTaskConfigResponse) Status() string { if r.HTTPResponse != nil { return r.HTTPResponse.Status } @@ -2280,21 +2765,22 @@ func (r DMAPIStopRelayResponse) Status() string { } // StatusCode returns HTTPResponse.StatusCode -func (r DMAPIStopRelayResponse) StatusCode() int { +func (r DMAPIGetTaskConfigResponse) StatusCode() int { if r.HTTPResponse != nil { return r.HTTPResponse.StatusCode } return 0 } -type DMAPITransferSourceResponse struct { +type DMAPUpdateTaskConfigResponse struct { Body []byte HTTPResponse *http.Response + JSON200 *Task JSON400 *ErrorWithMessage } // Status returns HTTPResponse.Status -func (r DMAPITransferSourceResponse) Status() string { +func (r DMAPUpdateTaskConfigResponse) Status() string { if r.HTTPResponse != nil { return r.HTTPResponse.Status } @@ -2302,7 +2788,7 @@ func (r DMAPITransferSourceResponse) Status() string { } // StatusCode returns HTTPResponse.StatusCode -func (r DMAPITransferSourceResponse) StatusCode() int { +func (r DMAPUpdateTaskConfigResponse) StatusCode() int { if r.HTTPResponse != nil { return r.HTTPResponse.StatusCode } @@ -2742,6 +3228,76 @@ func (c *ClientWithResponses) DMAPITransferSourceWithResponse(ctx context.Contex return ParseDMAPITransferSourceResponse(rsp) } +// DMAPIGetTaskConfigListWithResponse request returning *DMAPIGetTaskConfigListResponse +func (c *ClientWithResponses) DMAPIGetTaskConfigListWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*DMAPIGetTaskConfigListResponse, error) { + rsp, err := c.DMAPIGetTaskConfigList(ctx, reqEditors...) + if err != nil { + return nil, err + } + return ParseDMAPIGetTaskConfigListResponse(rsp) +} + +// DMAPICreateTaskConfigWithBodyWithResponse request with arbitrary body returning *DMAPICreateTaskConfigResponse +func (c *ClientWithResponses) DMAPICreateTaskConfigWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*DMAPICreateTaskConfigResponse, error) { + rsp, err := c.DMAPICreateTaskConfigWithBody(ctx, contentType, body, reqEditors...) + if err != nil { + return nil, err + } + return ParseDMAPICreateTaskConfigResponse(rsp) +} + +func (c *ClientWithResponses) DMAPICreateTaskConfigWithResponse(ctx context.Context, body DMAPICreateTaskConfigJSONRequestBody, reqEditors ...RequestEditorFn) (*DMAPICreateTaskConfigResponse, error) { + rsp, err := c.DMAPICreateTaskConfig(ctx, body, reqEditors...) + if err != nil { + return nil, err + } + return ParseDMAPICreateTaskConfigResponse(rsp) +} + +// DMAPIImportTaskConfigWithBodyWithResponse request with arbitrary body returning *DMAPIImportTaskConfigResponse +func (c *ClientWithResponses) DMAPIImportTaskConfigWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*DMAPIImportTaskConfigResponse, error) { + rsp, err := c.DMAPIImportTaskConfigWithBody(ctx, contentType, body, reqEditors...) + if err != nil { + return nil, err + } + return ParseDMAPIImportTaskConfigResponse(rsp) +} + +func (c *ClientWithResponses) DMAPIImportTaskConfigWithResponse(ctx context.Context, body DMAPIImportTaskConfigJSONRequestBody, reqEditors ...RequestEditorFn) (*DMAPIImportTaskConfigResponse, error) { + rsp, err := c.DMAPIImportTaskConfig(ctx, body, reqEditors...) + if err != nil { + return nil, err + } + return ParseDMAPIImportTaskConfigResponse(rsp) +} + +// DMAPIDeleteTaskConfigWithResponse request returning *DMAPIDeleteTaskConfigResponse +func (c *ClientWithResponses) DMAPIDeleteTaskConfigWithResponse(ctx context.Context, taskName string, reqEditors ...RequestEditorFn) (*DMAPIDeleteTaskConfigResponse, error) { + rsp, err := c.DMAPIDeleteTaskConfig(ctx, taskName, reqEditors...) + if err != nil { + return nil, err + } + return ParseDMAPIDeleteTaskConfigResponse(rsp) +} + +// DMAPIGetTaskConfigWithResponse request returning *DMAPIGetTaskConfigResponse +func (c *ClientWithResponses) DMAPIGetTaskConfigWithResponse(ctx context.Context, taskName string, reqEditors ...RequestEditorFn) (*DMAPIGetTaskConfigResponse, error) { + rsp, err := c.DMAPIGetTaskConfig(ctx, taskName, reqEditors...) + if err != nil { + return nil, err + } + return ParseDMAPIGetTaskConfigResponse(rsp) +} + +// DMAPUpdateTaskConfigWithResponse request returning *DMAPUpdateTaskConfigResponse +func (c *ClientWithResponses) DMAPUpdateTaskConfigWithResponse(ctx context.Context, taskName string, reqEditors ...RequestEditorFn) (*DMAPUpdateTaskConfigResponse, error) { + rsp, err := c.DMAPUpdateTaskConfig(ctx, taskName, reqEditors...) + if err != nil { + return nil, err + } + return ParseDMAPUpdateTaskConfigResponse(rsp) +} + // DMAPIGetTaskListWithResponse request returning *DMAPIGetTaskListResponse func (c *ClientWithResponses) DMAPIGetTaskListWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*DMAPIGetTaskListResponse, error) { rsp, err := c.DMAPIGetTaskList(ctx, reqEditors...) @@ -3336,6 +3892,196 @@ func ParseDMAPITransferSourceResponse(rsp *http.Response) (*DMAPITransferSourceR return response, nil } +// ParseDMAPIGetTaskConfigListResponse parses an HTTP response from a DMAPIGetTaskConfigListWithResponse call +func ParseDMAPIGetTaskConfigListResponse(rsp *http.Response) (*DMAPIGetTaskConfigListResponse, error) { + bodyBytes, err := ioutil.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &DMAPIGetTaskConfigListResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest GetTaskListResponse + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 400: + var dest ErrorWithMessage + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON400 = &dest + + } + + return response, nil +} + +// ParseDMAPICreateTaskConfigResponse parses an HTTP response from a DMAPICreateTaskConfigWithResponse call +func ParseDMAPICreateTaskConfigResponse(rsp *http.Response) (*DMAPICreateTaskConfigResponse, error) { + bodyBytes, err := ioutil.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &DMAPICreateTaskConfigResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 201: + var dest Task + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON201 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 400: + var dest ErrorWithMessage + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON400 = &dest + + } + + return response, nil +} + +// ParseDMAPIImportTaskConfigResponse parses an HTTP response from a DMAPIImportTaskConfigWithResponse call +func ParseDMAPIImportTaskConfigResponse(rsp *http.Response) (*DMAPIImportTaskConfigResponse, error) { + bodyBytes, err := ioutil.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &DMAPIImportTaskConfigResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 202: + var dest TaskConfigResponse + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON202 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 400: + var dest ErrorWithMessage + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON400 = &dest + + } + + return response, nil +} + +// ParseDMAPIDeleteTaskConfigResponse parses an HTTP response from a DMAPIDeleteTaskConfigWithResponse call +func ParseDMAPIDeleteTaskConfigResponse(rsp *http.Response) (*DMAPIDeleteTaskConfigResponse, error) { + bodyBytes, err := ioutil.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &DMAPIDeleteTaskConfigResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 400: + var dest ErrorWithMessage + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON400 = &dest + } + + return response, nil +} + +// ParseDMAPIGetTaskConfigResponse parses an HTTP response from a DMAPIGetTaskConfigWithResponse call +func ParseDMAPIGetTaskConfigResponse(rsp *http.Response) (*DMAPIGetTaskConfigResponse, error) { + bodyBytes, err := ioutil.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &DMAPIGetTaskConfigResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest Task + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 400: + var dest ErrorWithMessage + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON400 = &dest + + } + + return response, nil +} + +// ParseDMAPUpdateTaskConfigResponse parses an HTTP response from a DMAPUpdateTaskConfigWithResponse call +func ParseDMAPUpdateTaskConfigResponse(rsp *http.Response) (*DMAPUpdateTaskConfigResponse, error) { + bodyBytes, err := ioutil.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &DMAPUpdateTaskConfigResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest Task + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 400: + var dest ErrorWithMessage + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON400 = &dest + + } + + return response, nil +} + // ParseDMAPIGetTaskListResponse parses an HTTP response from a DMAPIGetTaskListWithResponse call func ParseDMAPIGetTaskListResponse(rsp *http.Response) (*DMAPIGetTaskListResponse, error) { bodyBytes, err := ioutil.ReadAll(rsp.Body) diff --git a/dm/openapi/gen.server.go b/dm/openapi/gen.server.go index e3bec85ff71..518da77bf7c 100644 --- a/dm/openapi/gen.server.go +++ b/dm/openapi/gen.server.go @@ -71,6 +71,24 @@ type ServerInterface interface { // transfer source to a free worker // (POST /api/v1/sources/{source-name}/transfer) DMAPITransferSource(c *gin.Context, sourceName string) + // get task config list + // (GET /api/v1/task/configs) + DMAPIGetTaskConfigList(c *gin.Context) + // create task config + // (POST /api/v1/task/configs) + DMAPICreateTaskConfig(c *gin.Context) + // import task config + // (POST /api/v1/task/configs/import) + DMAPIImportTaskConfig(c *gin.Context) + // delete task_config + // (DELETE /api/v1/task/configs/{task-name}) + DMAPIDeleteTaskConfig(c *gin.Context, taskName string) + // get task_config + // (GET /api/v1/task/configs/{task-name}) + DMAPIGetTaskConfig(c *gin.Context, taskName string) + // update task_config + // (PUT /api/v1/task/configs/{task-name}) + DMAPUpdateTaskConfig(c *gin.Context, taskName string) // get task list // (GET /api/v1/tasks) DMAPIGetTaskList(c *gin.Context) @@ -425,6 +443,93 @@ func (siw *ServerInterfaceWrapper) DMAPITransferSource(c *gin.Context) { siw.Handler.DMAPITransferSource(c, sourceName) } +// DMAPIGetTaskConfigList operation middleware +func (siw *ServerInterfaceWrapper) DMAPIGetTaskConfigList(c *gin.Context) { + for _, middleware := range siw.HandlerMiddlewares { + middleware(c) + } + + siw.Handler.DMAPIGetTaskConfigList(c) +} + +// DMAPICreateTaskConfig operation middleware +func (siw *ServerInterfaceWrapper) DMAPICreateTaskConfig(c *gin.Context) { + for _, middleware := range siw.HandlerMiddlewares { + middleware(c) + } + + siw.Handler.DMAPICreateTaskConfig(c) +} + +// DMAPIImportTaskConfig operation middleware +func (siw *ServerInterfaceWrapper) DMAPIImportTaskConfig(c *gin.Context) { + for _, middleware := range siw.HandlerMiddlewares { + middleware(c) + } + + siw.Handler.DMAPIImportTaskConfig(c) +} + +// DMAPIDeleteTaskConfig operation middleware +func (siw *ServerInterfaceWrapper) DMAPIDeleteTaskConfig(c *gin.Context) { + var err error + + // ------------- Path parameter "task-name" ------------- + var taskName string + + err = runtime.BindStyledParameter("simple", false, "task-name", c.Param("task-name"), &taskName) + if err != nil { + c.JSON(http.StatusBadRequest, gin.H{"msg": fmt.Sprintf("Invalid format for parameter task-name: %s", err)}) + return + } + + for _, middleware := range siw.HandlerMiddlewares { + middleware(c) + } + + siw.Handler.DMAPIDeleteTaskConfig(c, taskName) +} + +// DMAPIGetTaskConfig operation middleware +func (siw *ServerInterfaceWrapper) DMAPIGetTaskConfig(c *gin.Context) { + var err error + + // ------------- Path parameter "task-name" ------------- + var taskName string + + err = runtime.BindStyledParameter("simple", false, "task-name", c.Param("task-name"), &taskName) + if err != nil { + c.JSON(http.StatusBadRequest, gin.H{"msg": fmt.Sprintf("Invalid format for parameter task-name: %s", err)}) + return + } + + for _, middleware := range siw.HandlerMiddlewares { + middleware(c) + } + + siw.Handler.DMAPIGetTaskConfig(c, taskName) +} + +// DMAPUpdateTaskConfig operation middleware +func (siw *ServerInterfaceWrapper) DMAPUpdateTaskConfig(c *gin.Context) { + var err error + + // ------------- Path parameter "task-name" ------------- + var taskName string + + err = runtime.BindStyledParameter("simple", false, "task-name", c.Param("task-name"), &taskName) + if err != nil { + c.JSON(http.StatusBadRequest, gin.H{"msg": fmt.Sprintf("Invalid format for parameter task-name: %s", err)}) + return + } + + for _, middleware := range siw.HandlerMiddlewares { + middleware(c) + } + + siw.Handler.DMAPUpdateTaskConfig(c, taskName) +} + // DMAPIGetTaskList operation middleware func (siw *ServerInterfaceWrapper) DMAPIGetTaskList(c *gin.Context) { for _, middleware := range siw.HandlerMiddlewares { @@ -809,6 +914,18 @@ func RegisterHandlersWithOptions(router *gin.Engine, si ServerInterface, options router.POST(options.BaseURL+"/api/v1/sources/:source-name/transfer", wrapper.DMAPITransferSource) + router.GET(options.BaseURL+"/api/v1/task/configs", wrapper.DMAPIGetTaskConfigList) + + router.POST(options.BaseURL+"/api/v1/task/configs", wrapper.DMAPICreateTaskConfig) + + router.POST(options.BaseURL+"/api/v1/task/configs/import", wrapper.DMAPIImportTaskConfig) + + router.DELETE(options.BaseURL+"/api/v1/task/configs/:task-name", wrapper.DMAPIDeleteTaskConfig) + + router.GET(options.BaseURL+"/api/v1/task/configs/:task-name", wrapper.DMAPIGetTaskConfig) + + router.PUT(options.BaseURL+"/api/v1/task/configs/:task-name", wrapper.DMAPUpdateTaskConfig) + router.GET(options.BaseURL+"/api/v1/tasks", wrapper.DMAPIGetTaskList) router.POST(options.BaseURL+"/api/v1/tasks", wrapper.DMAPIStartTask) @@ -837,86 +954,89 @@ func RegisterHandlersWithOptions(router *gin.Engine, si ServerInterface, options // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/+xd63PbOJL/V3C6+zAzJVmS7TiJr/aDY3uyvrOdlK2pua2pnAKBkIQ1CNAAaI825f99", - "Cw+SIAlSlB8Ze8b7Ycch8Wh0/7rRD4D61kM8TjjDTMne/reeREscQ/PnIU2lwuIM6v/XDxLBEywUweY1", - "jCLzNMISCZIowllv3zzFUgI+B2qJAUqFwEyB2AwCGI9wr9/Dv8M4obi33xtvv90abY22xvvvtvfGvX5P", - "rRL9XCpB2KJ31+9BSm5wfR7OKGEYSAVV6mYj0k3jz6BEivNRZ5xTDJkelmIY4QD9RPojmTW4ph0GZTA2", - "pBbrs8MEFnbX7wl8nRKBo97+b7Zntticur5l8pe8N5/9EyOlp3LC+ZWLqz9QODOesmgqeSoQnmarL89p", - "mgDbBOgmubBuDe31aeOVvKaDUduECi6ap9Iv105i2oZmqMvQDtFdhpr1ZUpDjAoKVWCo8ATKqwt8nWKp", - "6oIVOOY3eBpjBS0D5jClqrc/h1TifoUht0uslhrFHNh+QPcDEVRwBiUGhIGI3zKpBIZx/rgXgrZH+pQS", - "S9l/CTzv7ff+c1iYkKGzH8NL0/4cxvhUt77r9xSUV+t66aXX+Oov2Q0TYt4RplhhO+8FlglnEtf5p7t3", - "X4Wmp1jDXWDWYyG4+JWo5RmWMohKPTvUfwOs2/b6FYrM0ynSAK31Ne8AsuB1cxOm8AILPbntGstFU8/Y", - "EbUOusVAfZ+eEJs/YlXaGDRrmtmtMaX/SxSO5TpulzecgttQCLgy/+YKUiPFCisqy7Ht+nb29kVYA/r4", - "i3CG+YkXYdH+iNTbAb8P2Zdm735Uwu2QT02+tgqPyHNr9J6e5Mfldzorxvwe1E/gjOJLJVKkUtFi4C2B", - "U2S20qm8puXN/PDi+GByDCYHH06PwVc1/gp++Eqir4Aw9cN4/CM4/zQB57+cnoKDXyafpifnhxfHZ8fn", - "k/7ni5Ozg4t/gP89/oft8SMY/jT5j9+QVXccTQmL8O9fwOHpL5eT44vjI/DT8EdwfP7x5Pz4byeM8aMP", - "4Oj454NfTifg8O8HF5fHk7+lav4unu2Cw0+npweT4+zf0xlhIdfELa3uoUSzoLOkNMsCzc3z9f6M1z0b", - "y+NqSFSnHEYOEbUNqfDUKYcRSBlRta1wThiRSxxNZyvlnnARQ2Xhs7cb3AS1X6A5RvnCg1vBBe/9dKFI", - "FGyUCL7QrnHwpYHoBjRV+FhZVXk8b+ryUgKEh1j+yXgXOKQhuQdZCQKQcUYUB9YzwcDIFgjXoSYUmspl", - "yde04U951F8FUViasMLCVE9ggowlRlcJJ0wBqZ9ABY7OAILM4oAoAOc6+hBYKigUYQvTzbh5QUf0mk4R", - "ZwqzwNrkNQUrnoJbyJS3wpLnH7AA4CsaFyYg01JtBvrgK9pufrUTfvUAvf/voOKvGKov9pckghnPeaJI", - "TKQiCMglFJFmo8aPtqrglqiljYacaDijK5BKHIHbJWYAOtcUcIRSIXVY0DTm0dEpiEvuaC6aCup9OYWA", - "+zkVIW9ZYApXgPIFQHrYNAEJpwStAOJsThapdaXrTvTvCRFuG8tgOqpi1DSyrrgiNhDNp+v163rNUkq1", - "alQCfs/26D/Fjd3n8nl39ka1qSdLHW3ZxhqYCRaERwRBSldWRQCxQXnBACKBXVbUB25wcANpiveBmULL", - "SWLEWSTvR73AMSRsKhOIcGkF4zdV+s8II3Eag7nAGEREXgHTy9Dw8cN9pg8FUxd67YdG0HVkGGNi3uWC", - "q8OAmW3KvixMuWc+zKvqdlDbqawd+jg5OcpyJWniouTcPBcWBb+H4zna3h5gNHo3GI/x+8FsG6LBaHt3", - "G6LxeDQa7eyPB2/f7b5vZkyh7SUSw0mVnMQ5obhIqrSTafMqM8K2Rvp/291piYgo4aO3NbQv7BR1OUVE", - "YKS4WGkDI3Ad2FJxbSfWUtCIkvVuhq/aZZTYfJfnM5SHqPDQ8BgQZhFujU/B1B8qXB33wfj92/c/hsx4", - "ad4G8IUw9wCwtYMrTIJlXJYc1AQ9PgEIKrScpsk0zrPLjakr0xakid3Hcul4flOTmue4vT8+i3VvDWU6", - "M0OGduhwRjJjokVlabiLlDHdeZ0XXgZrEET+ckMSbmJ6RnZoe740nkKe+6rrmfUkjO0xmbR+ETeuD0wq", - "seIlRqkgalWfxvgvLnksJS17AX0jtznBNAK3hFIww2BJoggz69cssMr9SX+g0iBgLnhsmpj9ea73wrpZ", - "KhsQhIWaQkr5LY6miNXJPuRxzBk4d5b58vIU6D5kThC0Xn/OrLXMkZJOEWz2eb2BranKWvpoC2JWD6xX", - "0jj0z95weh2fj8+ANYPD/3szeu/+ri5t/axXeNU86WExn5ZKIsiNXtoVXmWWGHiTr5mv6pSWeRngQZ3A", - "oHY4f/ij4GkSyKRENE8vdxf0nAipppQju8uEuuhAAEebDaugWGAVbJqyzQesJQnM6P1izbWF5GR7EwaZ", - "anOPdVNjnzf4esUe1rESkkpsPTvthqfaahhTKa0hCO25bsT6LrPkQdNorYwrtoUraiHNSKCUt1xEjSPm", - "DcpD7uy+2QuOx0UzdealN87OzmgvFCMkWZjWlgy0sVyxuecefFsn39nX0Pb2gNbEY9auXJRqXKjLX3Ur", - "L9p9uq67D8k8pzLk3zjq9MsahYJztd6UeWt3SHQid1N6gOqXlKVZ91q2e69+27zd21aDbnu+z7am+XK/", - "KVQyW1/3sm6A5DFWS+0I3Aoe8rgy3MqcmLW4LcT9AAwKnFCCYAMWbdm5YeDJEmelbedj0hWw9W+Xd8ut", - "ZrWQPRhviC2fkCB2dDBquNIh52giVwCdt92Uc3yN0l9UlF7CSKfCui2PlkrrPgBrw4Vxx5PusOPJWtT9", - "IYso1dFqniTlMOpol7zii3eMpSJ1KK8ya1Tf/LpbsnvEv4sc/S617+13aTgSto5jx+Vfrhgqlm/KS+Hl", - "61fAzOTTYFLs/ZCLLLDk9AZHU+PhcnQ1baghtRrs7BxSkH/hg0TNVjjjt1tnEFcFO1pyZHrVIE0DpThn", - "2Oy4gcXONCcIW2iuhKbwCwa3S4KWeUKJSJB13igOrmXtOubXAtYSYaamKulaYXRJ9ukMLwmLvJRVl755", - "gBUoZel3rSsqtWhekS0o4pvsEGcHumyX7jzw9GChg942mdsGFbFDgUHKBtkovuhb1boUaa+NRn1G+Iss", - "Sb3fLalWFk9QGFU9CPHJC399pWqCVUiZTWX3obm4pqp/XdMm7qRe3Xg2mYk5oZp/IqXYnT4luhekn0ut", - "152C+UDYKV/8bAa70GOF0v+YLSFDeGpPAE+z8x5LyBZ4bZnaywPYkAjINNFRE5hze9rXHSyOIgoSmi4I", - "63Lw15TqLSVlFyyKB+7cYiWvWT92aSjQHldWu22sORSDNp5ebd72fUDIq3Csxtk0Sk1sogKjLfmt5t8S", - "ssimB+eUIIUjsxITa6axVkZ+g8WtILb8bo4+fglt8VrBp3Hw+KOWxy1cmSIA59oOQIX1luLNkmApXZm6", - "1+8VNevwZHZL7ZagMH6Z6VBkKewxmJgsBFQ4x3uV2xpXrg0wbfrdT4IZXT+znSs6UEnpbbCMielwBBX8", - "ACXOTuM2cD2j3NX5M0bPU0r1QhgSOMbMntqC1JwEKkAFTaNO/k1BwhqlrgCyuv6gVKqyDpvVgMkJJcEV", - "NkqpB5YAqqwwSPENpjWTSBaMC2w3oUB+Qj/O3M8cFC1tSqwFUUy7WHBHgzv9Vj8jk0ClsDCRkTXdzcQ0", - "NS/o+v8jwZP1VN01SODnlFKHd61ngSCiVK7hc6CRmOuXRlE9Q4Q4k0QqzFCgqGTMCVOCU5BZGMKcu2Lq", - "RPbkBRfaqM3NAex8NAClTIXGalk2qeIhFujhwmVIbel1UBQRUTfNW8Ns/qkzqrWRbYOpWgoMo/LBl93q", - "bmMYZjto/iHOnFcWdPVI3DjyeC84tO2xdugmBJwwJDZDgGeEGgAgcEKnM6hQ+ejauH40xx9Le2pLwRn5", - "Vz6VGQPg3zFKzSOtD9cpZIqYqcLnahLakX3Vhdybh83eYb75t/qGTa5AyDcsNsV6wqISqhRTjHbmaLS9", - "tzPYfofeDsZj/HYA997sDPbQaPZuN3rzfr4z2h8P3o52x7vbO/3Rm923u9EO8pq/23mzPdge7USz7d29", - "KNqJ9seD8dtR8JpQOS3nXfsxL9xRjZaeCS9zaDcY2z1N6rclGdu0i5XclAZSBgJTqC1a+0k6rdD5Voqc", - "jNf5F1Ubfmf9hI3HqVqCssvWyOTqijo7Wx6S14WWPh1NYqj5bs1ni6yTqLh/Act3GWXHUKtijc1LM0CG", - "vIC269fdtF22lmQ7IsqPixrC1j64JTRCUERZPFYOeGaDnx6YsawVqZoymcrmucNOfQdaVZDW1gKLY1A2", - "dwhdRRG/KY58TGFEHEvAuMqD42zFsiKW8T052HECNetgHtcxL8j6FhUuRUotDC8C93aOv8RDApudEbhP", - "6f6JquLtdfCQ0CtFnLbEfYsD1VxarRvVYsbGwpWrUEmQKbXirtwr26pW68oO9ygFtxd/70wgomNDSI84", - "CoSPR2fgU4LZwecTcPTpUMtE0N5+b6lUIveHw4gjuZUQtkAw2UI8Hv5rOVQkmg20cg3shkg4G0qr3cav", - "mHOTBCfKrKQ2wQ0W0s79Zmtna2TyWQlmMCG9/d6O1iwDCbU01A5hQoY346G7xzW0WVjzyhnc/P7uSWSm", - "O/h8EroDa1LM9lKa6b09GrlANDvmBhObwdDr+ae0J7wKc9ymOK13bo0QKmqUIqStyl2/t/uIZNTuOgem", - "nkNCcWRgJNM4hmLV29ecBI7B/scGMn1ScCE11lyT3hfdu0Eww2/2D7N/31m8UWzTlAFJfZrPKWHYsu3c", - "5poSKGCMrZR/qyW/PPIyD0o/14DpZfnVnkdDz9cXmx8uuNnlQxBfasDZDRjGZyZRbvla+XREJ0Fmdqyj", - "hhUXtL+PhgUuhL8wDfM+ebGRhjnBDL+5zWEjDXObWgcN88lr1jCPhr+2hpU/YNIqyCjeyogLatZHrI44", - "+p/LT+cNqlQmS4+VHyavwy3iCJjpCqoijioUOZ+ghZy/T85OO5GjG64hZ6lseryJHOterjc9xWcV1oFZ", - "z+ycVnM7JT+waCB9nWKx8jBN1HKatwhgOFxeDOD3UQ1f4CMSAZD6FyhodtypIoJqk0IUWdRlIg7ZxHr7", - "/ZvL7OSi84I/8Gj1aOvNPnBRX6CbDcz0dHc1lo+/AwnPzQbZ6/6A4VtftiGx1pVs+M1LtKzfRvyv96xV", - "Ospn5uJsysh1Wr7b07yjlPM+nXaUxpPid/1a5o3b88o8sXl8SKU7dZidqjRhnKtVhKyDGeGBdmH30TAT", - "/JrSC4CsBRmADwXsMIGptBlOY3xarNZn3fIiu4/8zIH7pctW+9yEamThHU2ep8ye7M0O7TxU2ALLNO4m", - "7QvT9FXcTyhuK42nlLf3lcsOjqC9C9vFHXwC4TbfqXlSv7By//eFhMDZtSFbzGjyQbvCY/jN/lG4MB3A", - "YmqAzw8r/ZaCT8P0xdo7Th+sBz0pSssHY18WSG097P4YVVCoTjtWcT/rpWxYTxD21e6o3ZUrHZrYu5e4", - "WbpjzE+5WebXSLrslfmNzecDtNbDNt8luVL59OILMVT+N6L9D20/BqR40tF2uTt+f2XTVbnm+GexXBGR", - "T226lIBMzt0H0ptRNnHNXkz66YmgVj+Z8GfBWgaE3PniANqP2dn6yhp02bzduh0w+y7wExcqa58fDjDB", - "5CCp+/L689lQcqoKdtvvubfXBYz3NrH3z54C9/Xv7v+R9QH3EfyXUh2A9jcXhMq/2VqWbFWNht/MafNN", - "ygJO9BtZZf/OW8Ac5zR0NMZNJ+QDgXXgUymBdH/tBxX8eR/0bZWXWe/OEvcGTa6KshmYbMq+S7L+OePp", - "y1PWPf1Uyd3LrQTcAxs2p9wpt/+KjpeKDlc4uAc8HlgmyAsEH1YaPQcsul8o8Rw2rdfCxR/lGbdWLx6M", - "4g2rGXkd4xXSr/WVF6tLwSLLI6uS7jejeMOIxv+tjledemY61W++vNbE8gwBnXne8Bs8Lzl4q2ue9CBe", - "y/is339eNeRVQ753xa7lx8Ze7AbYqoZJ2qSG+Y9LvarixpP/VRTx8ZMRa3/S7M9SlCp+f20DfW33Wrsd", - "1fC+OPtXyqpvlAH7DrvMCz0VYtCaoaeKTvM5AHGToal823vF062Ix5Awc9e7p5nsBmj80Yf26+URRw+8", - "Uz68Tgm6GtjjdLakNZD5r+CWYNULGVv3LePvQqQjL387UO6nYz31CxCZXRbM22UP7r7c/TsAAP//p/gb", - "xQCAAAA=", + "H4sIAAAAAAAC/+w961PjOJ7/ii53H2amEpIATXdztR9oYHq5A7oLMjW3NdWXVmwl0WJLRpJhsl3871t6", + "2JZtyXZ4NZlmP+zQtiz99Hu/pHzrBTROKEFE8N7+tx4PliiG6s/DKOUCsTMo/18+SBhNEBMYqdcwDNXT", + "EPGA4URgSnr76iniHNA5EEsEgpQxRASI1SSA0BD1+j30J4yTCPX2e+Ptt1ujrdHWeP/d9t641++JVSKf", + "c8EwWfTu+j0Y4RtUX4eSCBMEuIAiNathbpaxVxAsRfmsM0ojBImcNkIwRA74MbdnUnswQztMSmCsQC32", + "p6dxbOyu32PoOsUMhb39P/SX2WZz6PoayV/yr+nsnygQcilDnN8pu/qOxJnRlIRTTlMWoGm2+/KaagjQ", + "Q4AckhPrVsFeXzZe8etoMGpaUMCFfyn5snURNda1Qp2GeoruNJSoL0PqQpSTqAxBgSaQX12g6xRxUScs", + "QzG9QdMYCagRMIdpJHr7cxhx1K8g5HaJxFJyMQX6OyC/AyEUcAY5ApiAkN4SLhiCcf6452JtC/RphDVk", + "/8XQvLff+89hoUKGRn8ML9X4cxijUzn6rt8TkF+1fSW3XsOrvWUzjQt5RyhCAul1LxBPKOGojj/5efdd", + "SHiKPdw5Vj1mjLLfsVieIc6dXClXh/JvgOTYXr8CkXo6DSSD1r5V70CgmdesjYlAC8Tk4vrTmC98X8YG", + "qDbWLSbq2/C40PwRiZJhkKjxo1vylPwvFijmbdguG5wC25AxuFL/pgJGiooVVFS2o8f19erNm9AK9PE3", + "YRTzE29Cc/sjQq8nfB6wL5XtflTA9ZRPDb7UCo+Ic630nh7kx8V3OivmfA7oJ3AWoUvB0kCkrEHBawCn", + "gTKlU34dlY354cXxweQYTA4+nB6Dr2L8Ffz0FYdfASbip/H4Z3D+aQLOfzs9BQe/TT5NT84PL47Pjs8n", + "/c8XJ2cHF/8A/3v8D/3Fz2D4y+Q//gi0uKNwikmI/vwCDk9/u5wcXxwfgV+GP4Pj848n58d/OyGEHn0A", + "R8e/Hvx2OgGHfz+4uDye/C0V83fxbBccfjo9PZgcZ/+ezjBxuSZma3UPJZw5nSUhUeYYrp63+zPW59lc", + "FlZdpDqlMDQcUTNIhaceURiClGBRM4VzTDBfonA6WwnzhLIYCs0+e7tOIyj9AomxiC4sdiuwYL2fLgQO", + "nYMSRhfSNXa+VCy6BkwVPFZ2VZ7PWrq8FQfgLpR/Ut4FcklI7kFWgoBAOSOCAu2ZIKBoC5j5oEaUKOXL", + "kq+pw5/yrL8zLBBXYYVmU7mACjKWKLhKKCYCcPkECnB0BgJINB9gAeBcRh8McQGZwGShPlNuntMRvY6m", + "ASUCEcfe+HUEVjQFt5AIa4clz9+hAcDXYFyogExKpRrog6/Btv/VjvvVA+T+v52CvyJBfbO/JSHMcE4T", + "gWPMBQ4AX0IWSjRK/pFaFdxisdTRkCENJdEKpByF4HaJCIDGNQU0CFLGZVjgm/Po6BTEJXc0J02F6206", + "uRj3c8pc3jJDEVyBiC5AIKdNE5DQCAcrEFAyx4tUu9J1J/rPBDNjxjI2HVV5VA3SrrjAOhDNl+v163JN", + "0iiSolEJ+C3dI/9kN9rO5evu7I1qS0+WMtrSgyVjJohhGuIARtFKiwjAOigvEIA50NsK+8BMDm5glKJ9", + "oJaQdOIooCTk94OeoRhiMuUJDFBpB+M3VfjPMMFxGoM5QwiEmF8B9ZWC4eOH+yzvCqYu5N4PFaHrnKGU", + "iXqXE67OBkSZKf2yUOWW+lCvquagZqm0Hvo4OTnKciVpYqLkXD0XGgW9h+N5sL09QMHo3WA8Ru8Hs20Y", + "DEbbu9swGI9Ho9HO/njw9t3uez9iCmkvgehOquQgznGEiqRKM5g6rzLDZGsk/7fdHZYQsxJ/9LaG+oVe", + "ok6nEDMUCMpWUsEwVGdsLqjUE60QeLmk3c2wRbvMJTrfZfkM5SkqOFQ4BphoDtfKp0DqTxWsjvtg/P7t", + "+59dary0rof5XDz3AGZrZi43CBpxWXJQAvT4AARQBMtpmkzjPLvsTV2psSBNtB3LqWP5TT4xz/n2/vxZ", + "7HtryNOZmtJlod0ZyQyJmitL012khMiP27zwMrM6mcjerovCPqRnYLvM86XyFPLcV13OtCehdI/KpPWL", + "uLE9MKnEipcoSBkWq/oyyn8xyWPOo7IX0Fd0m2MUheAWRxGYIbDEYYiI9msWSOT+pD1RaRIwZzRWQ5R9", + "nktbWFdLZQUSICamMIroLQqnAamDfUjjmBJwbjTz5eUpkN/gOQ6g9vpzZLUih/NoGkC/z2tNrFVVNtLm", + "NifPyonlTrxT/2pNJ/fx+fgMaDU4/L83o/fm7+rW2le9Qiv/oofFepIqCcM3cmtXaJVpYmAt3rJe1Skt", + "49KBgzqATukw/vBHRtPEkUkJozy93J3Qc8y4mEY00FbG9YkMBFC43rQCsgUSzqEpWX/CWpJAzd4v9lzb", + "SA62taATqTr3WFc1+rnH1ytsWMdKSMqR9uykG55KraFUJdeKwGVzzYx1K7OkTtWotYwptrkrai7JSCDn", + "t5SF3hnzAeUpd3bf7Dnno8wPnXppzbOzM9pzxQhJFqY1JQN1LFcY99yDb/rIdvYla1s2oDHxmI0rF6W8", + "GzX5q27lRW2n67L7kMxzyl3+jYFOvqxByCgV7arM2rvhRENys6TFUP2SsPhlr8HcW/Vbv7nXowbdbL6N", + "Nt96ud/kKpm11720G8BpjMRSOgK3jLo8roxveQ5MK98W5H4ADzKURDiAHl7UZWfPxJMlykrbxseMVkDX", + "v03eLdea1UL2YLwmb9mAOHlHBqMKKx1yjipyBdB4276c42uUvlFReolHOhXWdXm0VFq3GbA2nZvvaNKd", + "7WjSynXfZROlOlrNk4woDDvqJav4YrWxVKgO+VWmjerGr7smu0f8u8i536T2LXuXuiNh7Th23P7ligTF", + "9lV5yb19+QqolWwYVIq973KRGeI0ukHhVHm4NLiaempIjQo760Ny4s/dSOTXwhm+zT6dfFWgoyFHJncN", + "0tRRijOKTc/r2OxMYgKThcSKawm7YHC7xMEyTyhhDrKP14qDa1m7jvk1h7YMEBFTkXStMJok+3SGlpiE", + "Vsqqy7d5gOUoZcl3jTsqjfDvSBcU0U3WxNkBLv1JdxxYcrCQQW8TzfWACtkhQyAlg2wWm/SNYl2KtFuj", + "URsR9iZLVO93S6qVyeMkRlUOXHiywl9bqHxs5RJmVdl9aC7OV/WvS9rEdOrVladPTcxxJPHH0giZ7lMs", + "v4LR59Loti6YD5ic0sWvarILOZcr/Y/IEpIATXUH8DTr91hCskCtZWorD6BDIsDTREZNYE51t69pLA7D", + "CCRRusCkS+OvKtVrSMouWBgPTN9iJa9Zb7tUEEiPK6vdemsOxaTe7lW/2bcZgl+5YzVKpmGqYhPhmG1J", + "byX+lpCEOj04j3AgUKh2omLNNJbCSG8Qu2VYl99V6+MXl4mXAj6Nne2Pkh63cKWKAJRKPQAFkibFWiVB", + "nJsyda/fK2rW7sW0Se2WoFB+mfqgyFLoNpgYLxgUKOf3KrYlX5kxQI3pd+8EU7J+pj+uyEAlpbfGNibq", + "gyMo4AfIUdaN68F6Brmp82eInqdRJDdCAoZiRHTXFoxUJ1DBVFAN6uTfFCC0CHWFIav7d1KlSmu3WnWo", + "HFcSXCAllHJiDqDICoMRukFRTSXiBaEMaSPkyE/Ix5n7mTNFw5gSakEYR100uIHBdL/Ve2QSKARiKjLS", + "qtsPjG94Adf/HzGatEN156GAFi5vw3uhRdZJ8uZfqWgj6xsQKE4iw0LN3SvFql9awPb1Ic4hjlA4VUxe", + "yyc2pLMcXXymY93xtpaMz4baveGuHdSKTGkQIM494K5XEqjP1a9jw4fWX9MoMtpPYtgRUpaKd3QOpF7K", + "ta1coJ4vDCjhmAtEAkeJURkXIhiNQGZvMDHOq6oa6j4cyqSJm6t2/Hw2ADlPmdRcZUlNBXUJhJzOXZSW", + "dl+GyCFmdUO9NczWnxoTW5tZD5iKJUMwLLdB7VbFRCFMfyDxF1BifHSn449j78zjPefU+ovWqX364IQE", + "bD0OsEyShwEYSqLpDIqg3Mg4rjdq2XNJv33JKMH/ypdScwD0JwpS9Uhqx+sUEoHVUu4uqyTqiL7qRu6N", + "Q3+skLuCjZGCzzF0RQqFi1TXbJXAtVhitDMPRtt7O4Ptd8HbwXiM3g7g3pudwV4wmr3bDd+8n++M9seD", + "t6Pd8e72Tn/0ZvftbrgTWMPf7bzZHmyPdsLZ9u5eGO6E++PB+O3IeWisnKS1DoGpF6Zxp+HLhJYxtOuM", + "9J+mENCQmvfp0JLT6gFlwJC0hGFLX6UU6NyxCgyN27zNqg6/017j2vNUNUHZgfciubqjzq63xcmtNs6C", + "w0eGmifv7zTTIYOg9nE8O4DgHQPvijZWL9UEGec5pF2+7ibtvLFA35Gj7CjZk8Tog1schQFkYRadl8Pf", + "2eCXB+avayVLX15b6KqHO8TrAKtwwtpYbjMIytZ2em95S4cvq/CYxAgp4oBQkadKsh3zClnG98RgxwXE", + "rIN6bEOeE/UNIlyKmxsQXqRxmjG+iS0j63WM3KeR44l6JJq7IlxEr5T0mso4DQ6Uv9BeV6rFit4ypqlX", + "cpAJtaCm+M+baphtRah7NAY0twLcqUBESGpFRzRwJBOOzsCnBJGDzyfg6NOhpAmLevu9pRAJ3x8OQxrw", + "rQSTRQCTrYDGw38thwKHs4EUroE2iJiSIdfSrfyKOVURKhZqJ7UFbhDjeu03WztbI5XdTBCBCe7t93ak", + "ZCmWEEsF7RAmeHgzHppTfUOdk1evjMLNT3OfhGq5g88nrhPRquCgUwPq6+3RyASiWdMjTHQ+S+7nn1z3", + "+xXquElwGk9gKyJUxEjH43Lru48IRu3ku2NpHfYrNuJpHEO26u1LTAKDYPvqiUyeBFxwyWtmSO+L/NpD", + "mOE3/Yey33ea3yKkc0UOSn2azyNMkEbbuc48JpDBGGkq/1FLhVrgZR6UfC4Zppdl23sWDD1bXnS1oMBm", + "l2tBvtQYZ9ehGF8YRanGa+UikU6EzPRYRwkrjus/j4Q5rgfYMAmzLkBZS8IMYYbfjHFYS8KMUesgYTZ4", + "fgmzYPixJax8nU0jIcN4KwPOKVkfkTiiwf9cfjr3iFIZLDlXfrSgzm4hDYBaroAqpEEFIuMTNIDz98nZ", + "aSdw5MAWcJZCF0t84Gj3sl31FJdstDGzXNk4reqsUt6+qlj6OkVsZfE0FstpPsLBw+5is4N/H1XxOa4U", + "cTCpfZwmyprfKiSoDilIkUVdKuLgPtTr25Ausz5W4wV/oOHq0fabXXdS36BZDczkcnc1lI+fAYSXpoP0", + "5Q+AoFubti6y1oVs+M1KtLSbEfsup1ahi+hMHaNOCb5Oyye9/BalnPfpZFG85wbu+rXMG9Xd6zTReXwY", + "cdODmvXYqjDO1Cpc2kHN8EC9sPtoPOO8W2sDWFYzGYAPZdhhAlOuM5xK+TRorc9y5EV2Ov2FM+6XLqb2", + "pRFV0cJqVJ+nRPd5Zy1cDyU2QzyNu1H7Qg19JfcTkltT4ynpbd152sER1Ceju7iDT0Bc/wmrJ/ULK6fB", + "NyQEzg6R6WKGzwftyh7Db/qPwoXpwCyqBvjyeKXfUPDxLF/svePyznrQk3JpuU16s5hU18Puz6MCMtHJ", + "YhWn9TbFYD1B2Fc7sXhXrnRIYO820ViapvanNJb5oaIutjI/v/tyGK2x2eZZkiuVizg3RFHZN4bb164/", + "BkvRpKPuMic+f2TVVTn0+lfRXCHmT626BIOEz811+X4um5hhG5N+eiJWq3cm/FV4LWOE3PmiAOqrDXV9", + "pYW7BORXQ93m024Ii7MCz1C1rN1M7cCISkhG5lL+l2NdrGMaFS9Y3/jfpVZQ4PqJ6gXm9wm+X7XAB8AL", + "rRVYVK0T1CNRQ31+oEVJn6hBz0Lw8gGl9am//STQbI7zaM6D3IsXvqmzCOsUjUossZbltk/JOkx2DklH", + "g+07RbGZDQimkqIOcPmI2O9qCTeHMqMfTnNnxriByknqobK+kfyVzptA51TRqpnUFa3czdl9dXPb3Nx7", + "+LcqYznRN3A8hZ9T/+WxVy+3s5cL9a/OGR+nXYzu6dW8VG3a73JZpKPFpfaTcva6D7pdcqNdLM1NpnNo", + "PWbSbSpdGlReMj99ecpeP7s8eLe53S/34A3dR9Gpn+WVOzaVO0yzzD3Y44GtMXlTzIeV5J4DEt4vff4S", + "jNZrs8738owbO3YezMVrdvDkvTuvLP3aU7SxsuRsLHpkUZLfzSK0ZkRj/1rhq0y9MJnq+y9s8KE844DO", + "OPf8Cumm58fLksctFl83X/4qIa8S8h261Bp+bnljDWCjGHoLGif5z+u+iuLai/8ogvj4yYjWH3X+qzRi", + "Fb9AvYa8Nnut3dqTrd/c+JGy6mtlwJ7BymxoJ7Ti1ox7qtyprsBiNxk3lW84WtF0K6QxxETdb9STSDYT", + "eH/2rvlKpZAGD7xHaXid4uBqoI+Q6JLWwCx+V2GrnkvZml9zeRYgDXj524Fa/q4kfg4gswsy8nHZg7sv", + "d/8OAAD//7mGQmsCjQAA", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/dm/openapi/gen.types.go b/dm/openapi/gen.types.go index 4d5cea78df4..b9e0e80806b 100644 --- a/dm/openapi/gen.types.go +++ b/dm/openapi/gen.types.go @@ -395,6 +395,21 @@ type TaskBinLogFilterRule struct { IgnoreSql *[]string `json:"ignore_sql,omitempty"` } +// TaskConfigRequest defines model for TaskConfigRequest. +type TaskConfigRequest struct { + // whether to overwrite task config template + Overwrite bool `json:"overwrite"` +} + +// TaskConfigResponse defines model for TaskConfigResponse. +type TaskConfigResponse struct { + FailedTaskList []struct { + ErrorMsg string `json:"error_msg"` + TaskName string `json:"task_name"` + } `json:"failed_task_list"` + SuccessTaskList []string `json:"success_task_list"` +} + // configuration of full migrate tasks type TaskFullMigrateConf struct { // to control the way in which data is exported for consistency assurance @@ -522,6 +537,12 @@ type DMAPIStopRelayJSONBody StopRelayRequest // DMAPITransferSourceJSONBody defines parameters for DMAPITransferSource. type DMAPITransferSourceJSONBody WorkerNameRequest +// DMAPICreateTaskConfigJSONBody defines parameters for DMAPICreateTaskConfig. +type DMAPICreateTaskConfigJSONBody Task + +// DMAPIImportTaskConfigJSONBody defines parameters for DMAPIImportTaskConfig. +type DMAPIImportTaskConfigJSONBody TaskConfigRequest + // DMAPIStartTaskJSONBody defines parameters for DMAPIStartTask. type DMAPIStartTaskJSONBody CreateTaskRequest @@ -558,6 +579,12 @@ type DMAPIStopRelayJSONRequestBody DMAPIStopRelayJSONBody // DMAPITransferSourceJSONRequestBody defines body for DMAPITransferSource for application/json ContentType. type DMAPITransferSourceJSONRequestBody DMAPITransferSourceJSONBody +// DMAPICreateTaskConfigJSONRequestBody defines body for DMAPICreateTaskConfig for application/json ContentType. +type DMAPICreateTaskConfigJSONRequestBody DMAPICreateTaskConfigJSONBody + +// DMAPIImportTaskConfigJSONRequestBody defines body for DMAPIImportTaskConfig for application/json ContentType. +type DMAPIImportTaskConfigJSONRequestBody DMAPIImportTaskConfigJSONBody + // DMAPIStartTaskJSONRequestBody defines body for DMAPIStartTask for application/json ContentType. type DMAPIStartTaskJSONRequestBody DMAPIStartTaskJSONBody diff --git a/dm/openapi/spec/dm.yaml b/dm/openapi/spec/dm.yaml index 1c9839e28ae..160642c8aeb 100644 --- a/dm/openapi/spec/dm.yaml +++ b/dm/openapi/spec/dm.yaml @@ -723,6 +723,149 @@ paths: "application/json": schema: $ref: "#/components/schemas/ErrorWithMessage" + /api/v1/task/configs: + post: + tags: + - task + summary: "create task config" + operationId: "DMAPICreateTaskConfig" + requestBody: + description: "request body" + content: + "application/json": + schema: + $ref: "#/components/schemas/Task" + responses: + "201": + description: "success" + content: + "application/json": + schema: + $ref: "#/components/schemas/Task" + "400": + description: "failed" + content: + "application/json": + schema: + $ref: "#/components/schemas/ErrorWithMessage" + get: + tags: + - task + summary: "get task config list" + operationId: "DMAPIGetTaskConfigList" + responses: + "200": + description: "task list" + content: + "application/json": + schema: + $ref: "#/components/schemas/GetTaskListResponse" + "400": + description: "failed" + content: + "application/json": + schema: + $ref: "#/components/schemas/ErrorWithMessage" + /api/v1/task/configs/import: + post: + tags: + - task + summary: "import task config" + operationId: "DMAPIImportTaskConfig" + requestBody: + description: "request body" + content: + "application/json": + schema: + $ref: "#/components/schemas/TaskConfigRequest" + responses: + "202": + description: "success" + content: + "application/json": + schema: + $ref: "#/components/schemas/TaskConfigResponse" + "400": + description: "failed" + content: + "application/json": + schema: + $ref: "#/components/schemas/ErrorWithMessage" + /api/v1/task/configs/{task-name}: + get: + tags: + - task + summary: "get task_config" + operationId: "DMAPIGetTaskConfig" + parameters: + - name: task-name + in: path + description: "globally unique task name" + required: true + schema: + type: string + example: "task-1" + responses: + "200": + description: "success" + content: + "application/json": + schema: + $ref: "#/components/schemas/Task" + "400": + description: "failed" + content: + "application/json": + schema: + $ref: "#/components/schemas/ErrorWithMessage" + put: + tags: + - task + summary: "update task_config" + operationId: "DMAPUpdateTaskConfig" + parameters: + - name: task-name + in: path + description: "globally unique task name" + required: true + schema: + type: string + example: "task-1" + responses: + "200": + description: "success" + content: + "application/json": + schema: + $ref: "#/components/schemas/Task" + "400": + description: "failed" + content: + "application/json": + schema: + $ref: "#/components/schemas/ErrorWithMessage" + delete: + tags: + - task + summary: "delete task_config" + operationId: "DMAPIDeleteTaskConfig" + parameters: + - name: task-name + in: path + description: "globally unique task name" + required: true + schema: + type: string + example: "task-1" + responses: + "204": + description: "success" + "400": + description: "failed" + content: + "application/json": + schema: + $ref: "#/components/schemas/ErrorWithMessage" /api/v1/cluster/masters: get: @@ -1504,6 +1647,7 @@ components: required: - "remove_meta" - "task" + OperateTaskTableStructureRequest: description: action to operate table request type: object @@ -1617,3 +1761,35 @@ components: required: - "total" - "data" + + TaskConfigRequest: + type: object + properties: + overwrite: + type: boolean + default: false + description: whether to overwrite task config template + required: + - "overwrite" + TaskConfigResponse: + type: object + properties: + success_task_list: + type: array + items: + type: string + failed_task_list: + type: array + items: + type: object + properties: + task_name: + type: string + error_msg: + type: string + required: + - "task_name" + - "error_msg" + required: + - "success_task_list" + - "failed_task_list" diff --git a/dm/pkg/conn/basedb.go b/dm/pkg/conn/basedb.go index 250adb6aaeb..a9dbbf80401 100644 --- a/dm/pkg/conn/basedb.go +++ b/dm/pkg/conn/basedb.go @@ -24,15 +24,16 @@ import ( "sync/atomic" "github.com/DATA-DOG/go-sqlmock" + "github.com/go-sql-driver/mysql" "github.com/pingcap/failpoint" + toolutils "github.com/pingcap/tidb-tools/pkg/utils" + "go.uber.org/zap" "github.com/pingcap/tiflow/dm/dm/config" + "github.com/pingcap/tiflow/dm/pkg/log" "github.com/pingcap/tiflow/dm/pkg/retry" "github.com/pingcap/tiflow/dm/pkg/terror" "github.com/pingcap/tiflow/dm/pkg/utils" - - "github.com/go-sql-driver/mysql" - toolutils "github.com/pingcap/tidb-tools/pkg/utils" ) var customID int64 @@ -182,6 +183,13 @@ func (d *BaseDB) CloseBaseConn(conn *BaseConn) error { return conn.close() } +// CloseBaseConnWithoutErr close the base connect and output a warn log if meets an error. +func CloseBaseConnWithoutErr(d *BaseDB, conn *BaseConn) { + if err1 := d.CloseBaseConn(conn); err1 != nil { + log.L().Warn("close db connection failed", zap.Error(err1)) + } +} + // Close release *BaseDB resource. func (d *BaseDB) Close() error { if d == nil || d.DB == nil { diff --git a/dm/pkg/cputil/table.go b/dm/pkg/cputil/table.go index d8eb4385e86..c7b6ec0dbd9 100644 --- a/dm/pkg/cputil/table.go +++ b/dm/pkg/cputil/table.go @@ -18,6 +18,11 @@ func LoaderCheckpoint(task string) string { return task + "_loader_checkpoint" } +// LightningCheckpoint returns lightning's checkpoint table name. +func LightningCheckpoint(task string) string { + return task + "_lightning_checkpoint_list" +} + // SyncerCheckpoint returns syncer's checkpoint table name. func SyncerCheckpoint(task string) string { return task + "_syncer_checkpoint" diff --git a/dm/tests/dmctl_basic/conf/get_worker1.toml b/dm/tests/dmctl_basic/conf/get_worker1.toml index 5155bd6f8ae..8d7ae26c1ae 100644 --- a/dm/tests/dmctl_basic/conf/get_worker1.toml +++ b/dm/tests/dmctl_basic/conf/get_worker1.toml @@ -9,6 +9,7 @@ advertise-addr = "127.0.0.1:8262" config-file = "/tmp/dm_test/dmctl_basic/worker1/dm-worker.toml" keepalive-ttl = 60 relay-keepalive-ttl = 1800 +relay-dir = "" ssl-ca = "" ssl-cert = "" ssl-key = "" diff --git a/dm/tests/dmctl_basic/conf/get_worker2.toml b/dm/tests/dmctl_basic/conf/get_worker2.toml index 5948a62439a..88b398e6e0c 100644 --- a/dm/tests/dmctl_basic/conf/get_worker2.toml +++ b/dm/tests/dmctl_basic/conf/get_worker2.toml @@ -9,6 +9,7 @@ advertise-addr = "127.0.0.1:8263" config-file = "/tmp/dm_test/dmctl_basic/worker2/dm-worker.toml" keepalive-ttl = 60 relay-keepalive-ttl = 1800 +relay-dir = "" ssl-ca = "" ssl-cert = "" ssl-key = "" diff --git a/dm/tests/print_status/conf/dm-worker1.toml b/dm/tests/print_status/conf/dm-worker1.toml index 7a72ea72bf8..c9da9f1b006 100644 --- a/dm/tests/print_status/conf/dm-worker1.toml +++ b/dm/tests/print_status/conf/dm-worker1.toml @@ -1,2 +1,3 @@ name = "worker1" join = "127.0.0.1:8261" +relay-dir = "placeholder" diff --git a/dm/tests/print_status/run.sh b/dm/tests/print_status/run.sh index 539602c4300..67469c5ecf4 100755 --- a/dm/tests/print_status/run.sh +++ b/dm/tests/print_status/run.sh @@ -19,9 +19,12 @@ function run() { "github.com/pingcap/tiflow/dm/syncer/ProcessBinlogSlowDown=sleep(4)") export GO_FAILPOINTS="$(join_string \; ${inject_points[@]})" + cp $cur/conf/dm-worker1.toml $WORK_DIR/dm-worker1.toml + sed -i "s%placeholder%$WORK_DIR/relay_by_worker%g" $WORK_DIR/dm-worker1.toml + run_dm_master $WORK_DIR/master $MASTER_PORT $cur/conf/dm-master.toml check_rpc_alive $cur/../bin/check_master_online 127.0.0.1:$MASTER_PORT - run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $cur/conf/dm-worker1.toml + run_dm_worker $WORK_DIR/worker1 $WORKER1_PORT $WORK_DIR/dm-worker1.toml check_rpc_alive $cur/../bin/check_worker_online 127.0.0.1:$WORKER1_PORT # operate mysql config to worker cp $cur/conf/source1.yaml $WORK_DIR/source1.yaml @@ -34,6 +37,8 @@ function run() { # use sync_diff_inspector to check full dump loader check_sync_diff $WORK_DIR $cur/conf/diff_config.toml + ls $WORK_DIR/relay_by_worker/worker1/* + run_sql_file $cur/data/db.increment.sql $MYSQL_HOST1 $MYSQL_PORT1 $MYSQL_PASSWORD1 check_sync_diff $WORK_DIR $cur/conf/diff_config.toml check_log_contains $WORK_DIR/worker1/log/dm-worker.log 'enable safe-mode because of task initialization.*"duration in seconds"=60' diff --git a/docs/design/2021-10-13-ticdc-mq-sink-column-selector.md b/docs/design/2021-10-13-ticdc-mq-sink-column-selector.md index 11b75217d11..e440c58ba13 100644 --- a/docs/design/2021-10-13-ticdc-mq-sink-column-selector.md +++ b/docs/design/2021-10-13-ticdc-mq-sink-column-selector.md @@ -45,7 +45,7 @@ dispatchers = [ {matcher = ['test3.*', 'test4.*'], dispatcher = "rowid"}, ] -protocol = "default" +protocol = "open-protocol" column-selectors = [ {matcher = ['test1.*', 'test2.*'], columns = ["Column selector expression"]}, diff --git a/pkg/cmd/cli/cli_changefeed_create.go b/pkg/cmd/cli/cli_changefeed_create.go index 2028b8dcf6f..9f8667026f0 100644 --- a/pkg/cmd/cli/cli_changefeed_create.go +++ b/pkg/cmd/cli/cli_changefeed_create.go @@ -43,12 +43,6 @@ import ( "go.uber.org/zap" ) -// forceEnableOldValueProtocols specifies which protocols need to be forced to enable old value. -var forceEnableOldValueProtocols = []string{ - "canal", - "maxwell", -} - // changefeedCommonOptions defines common changefeed flags. type changefeedCommonOptions struct { noConfirm bool @@ -208,7 +202,7 @@ func (o *createChangefeedOptions) completeCfg(ctx context.Context, cmd *cobra.Co if protocol != "" { cfg.Sink.Protocol = protocol } - for _, fp := range forceEnableOldValueProtocols { + for _, fp := range config.ForceEnableOldValueProtocols { if cfg.Sink.Protocol == fp { log.Warn("Attempting to replicate without old value enabled. CDC will enable old value and continue.", zap.String("protocol", cfg.Sink.Protocol)) cfg.EnableOldValue = true diff --git a/pkg/cmd/server/server_test.go b/pkg/cmd/server/server_test.go index 621ed84a2b6..60de27a701a 100644 --- a/pkg/cmd/server/server_test.go +++ b/pkg/cmd/server/server_test.go @@ -191,18 +191,19 @@ func (s *serverSuite) TestParseCfg(c *check.C) { EnableTableActor: true, EnableDBSorter: false, DB: &config.DBConfig{ - Count: 16, - Concurrency: 256, - MaxOpenFiles: 10000, - BlockSize: 65536, - BlockCacheSize: 4294967296, - WriterBufferSize: 8388608, - Compression: "snappy", - TargetFileSizeBase: 8388608, - CompactionL0Trigger: 160, - WriteL0SlowdownTrigger: math.MaxInt32, - WriteL0PauseTrigger: math.MaxInt32, - CleanupSpeedLimit: 10000, + Count: 16, + Concurrency: 256, + MaxOpenFiles: 10000, + BlockSize: 65536, + BlockCacheSize: 4294967296, + WriterBufferSize: 8388608, + Compression: "snappy", + TargetFileSizeBase: 8388608, + WriteL0SlowdownTrigger: math.MaxInt32, + WriteL0PauseTrigger: math.MaxInt32, + CompactionL0Trigger: 160, + CompactionDeletionThreshold: 160000, + CleanupSpeedLimit: 10000, }, // We expect the default configuration here. Messages: &config.MessagesConfig{ @@ -263,6 +264,7 @@ writer-buffer-size = 9 compression = "none" target-file-size-base = 10 compaction-l0-trigger = 11 +compaction-deletion-threshold = 15 write-l0-slowdown-trigger = 12 write-l0-pause-trigger = 13 cleanup-speed-limit = 14 @@ -326,18 +328,19 @@ server-worker-pool-size = 16 EnableTableActor: true, EnableDBSorter: false, DB: &config.DBConfig{ - Count: 5, - Concurrency: 6, - MaxOpenFiles: 7, - BlockSize: 32768, - BlockCacheSize: 8, - WriterBufferSize: 9, - Compression: "none", - TargetFileSizeBase: 10, - CompactionL0Trigger: 11, - WriteL0SlowdownTrigger: 12, - WriteL0PauseTrigger: 13, - CleanupSpeedLimit: 14, + Count: 5, + Concurrency: 6, + MaxOpenFiles: 7, + BlockSize: 32768, + BlockCacheSize: 8, + WriterBufferSize: 9, + Compression: "none", + TargetFileSizeBase: 10, + CompactionL0Trigger: 11, + WriteL0SlowdownTrigger: 12, + WriteL0PauseTrigger: 13, + CleanupSpeedLimit: 14, + CompactionDeletionThreshold: 15, }, Messages: &config.MessagesConfig{ ClientMaxBatchInterval: config.TomlDuration(500 * time.Millisecond), @@ -460,18 +463,19 @@ cert-allowed-cn = ["dd","ee"] EnableTableActor: true, EnableDBSorter: false, DB: &config.DBConfig{ - Count: 16, - Concurrency: 256, - MaxOpenFiles: 10000, - BlockSize: 65536, - BlockCacheSize: 4294967296, - WriterBufferSize: 8388608, - Compression: "snappy", - TargetFileSizeBase: 8388608, - CompactionL0Trigger: 160, - WriteL0SlowdownTrigger: math.MaxInt32, - WriteL0PauseTrigger: math.MaxInt32, - CleanupSpeedLimit: 10000, + Count: 16, + Concurrency: 256, + MaxOpenFiles: 10000, + BlockSize: 65536, + BlockCacheSize: 4294967296, + WriterBufferSize: 8388608, + Compression: "snappy", + TargetFileSizeBase: 8388608, + WriteL0SlowdownTrigger: math.MaxInt32, + WriteL0PauseTrigger: math.MaxInt32, + CompactionL0Trigger: 160, + CompactionDeletionThreshold: 160000, + CleanupSpeedLimit: 10000, }, // We expect the default configuration here. Messages: &config.MessagesConfig{ diff --git a/pkg/cmd/util/changefeed.toml b/pkg/cmd/util/changefeed.toml index 53b169d5e71..db5f686d78c 100644 --- a/pkg/cmd/util/changefeed.toml +++ b/pkg/cmd/util/changefeed.toml @@ -37,10 +37,10 @@ column-selectors = [ { matcher = ['test3.*', 'test4.*'], columns = ["!a", "column3"] }, ] # 对于 MQ 类的 Sink,可以指定消息的协议格式 -# 协议目前支持 default, canal, avro 和 maxwell 四种,default 为 ticdc-open-protocol +# 协议目前支持 open-protocol, canal, canal-json, avro 和 maxwell 五种。 # For MQ Sinks, you can configure the protocol of the messages sending to MQ -# Currently the protocol support default, canal, avro and maxwell. Default is ticdc-open-protocol -protocol = "default" +# Currently the protocol support open-protocol, canal, canal-json, avro and maxwell. +protocol = "open-protocol" [cyclic-replication] # 是否开启环形复制 diff --git a/pkg/cmd/util/helper_test.go b/pkg/cmd/util/helper_test.go index a6e466508f9..93b74d2bfda 100644 --- a/pkg/cmd/util/helper_test.go +++ b/pkg/cmd/util/helper_test.go @@ -193,7 +193,7 @@ func (s *utilsSuite) TestAndWriteExampleReplicaTOML(c *check.C) { {Matcher: []string{"test1.*", "test2.*"}, Columns: []string{"column1", "column2"}}, {Matcher: []string{"test3.*", "test4.*"}, Columns: []string{"!a", "column3"}}, }, - Protocol: "default", + Protocol: "open-protocol", }) c.Assert(cfg.Cyclic, check.DeepEquals, &config.CyclicConfig{ Enable: false, diff --git a/pkg/config/config_test_data.go b/pkg/config/config_test_data.go index ad36f0ec768..0c10af86ac3 100644 --- a/pkg/config/config_test_data.go +++ b/pkg/config/config_test_data.go @@ -118,9 +118,10 @@ const ( "writer-buffer-size": 8388608, "compression": "snappy", "target-file-size-base": 8388608, - "compaction-l0-trigger": 160, "write-l0-slowdown-trigger": 2147483647, "write-l0-pause-trigger": 2147483647, + "compaction-l0-trigger": 160, + "compaction-deletion-threshold": 160000, "cleanup-speed-limit": 10000 }, "messages": { diff --git a/pkg/config/db.go b/pkg/config/db.go index 0b49d34ebde..b2464fc3521 100644 --- a/pkg/config/db.go +++ b/pkg/config/db.go @@ -50,11 +50,6 @@ type DBConfig struct { // // The default value is 8388608, 8MB. TargetFileSizeBase int `toml:"target-file-size-base" json:"target-file-size-base"` - // CompactionL0Trigger defines number of leveldb sst file at level-0 that will - // trigger compaction. - // - // The default value is 160. - CompactionL0Trigger int `toml:"compaction-l0-trigger" json:"compaction-l0-trigger"` // WriteL0SlowdownTrigger defines number of leveldb sst file at level-0 that // will trigger write slowdown. // @@ -65,6 +60,18 @@ type DBConfig struct { // // The default value is 1<<31 - 1. WriteL0PauseTrigger int `toml:"write-l0-pause-trigger" json:"write-l0-pause-trigger"` + // CompactionL0Trigger defines number of leveldb sst file at level-0 that will + // trigger compaction. + // + // The default value is 160. + CompactionL0Trigger int `toml:"compaction-l0-trigger" json:"compaction-l0-trigger"` + // CompactionDeletionThreshold defines the threshold of the number of deletion that + // trigger compaction. + // + // The default value is 160000. + // Iterator.First() takes about 27ms to 149ms in this case, + // see pkg/db.BenchmarkNext. + CompactionDeletionThreshold int `toml:"compaction-deletion-threshold" json:"compaction-deletion-threshold"` // CleanupSpeedLimit limits clean up speed, based on key value entry count. // // The default value is 10000. diff --git a/pkg/config/replica_config.go b/pkg/config/replica_config.go index 09ec851e7d1..8f29bd88f94 100644 --- a/pkg/config/replica_config.go +++ b/pkg/config/replica_config.go @@ -35,9 +35,7 @@ var defaultReplicaConfig = &ReplicaConfig{ Mounter: &MounterConfig{ WorkerNum: 16, }, - Sink: &SinkConfig{ - Protocol: ProtocolOpen.String(), - }, + Sink: &SinkConfig{}, Cyclic: &CyclicConfig{ Enable: false, }, diff --git a/pkg/config/replica_config_test.go b/pkg/config/replica_config_test.go index d99d0efc22d..1fd6c93fc67 100644 --- a/pkg/config/replica_config_test.go +++ b/pkg/config/replica_config_test.go @@ -35,6 +35,7 @@ func TestReplicaConfigMarshal(t *testing.T) { conf.ForceReplicate = true conf.Filter.Rules = []string{"1.1"} conf.Mounter.WorkerNum = 3 + conf.Sink.Protocol = "open-protocol" conf.Sink.ColumnSelectors = []*ColumnSelector{ { Matcher: []string{"1.1"}, @@ -74,6 +75,7 @@ func TestReplicaConfigOutDated(t *testing.T) { conf.ForceReplicate = true conf.Filter.Rules = []string{"1.1"} conf.Mounter.WorkerNum = 3 + conf.Sink.Protocol = "open-protocol" conf.Sink.DispatchRules = []*DispatchRule{ {Matcher: []string{"a.b"}, Dispatcher: "r1"}, {Matcher: []string{"a.c"}, Dispatcher: "r2"}, diff --git a/pkg/config/server_config.go b/pkg/config/server_config.go index c95eda53408..aad3ef30ced 100644 --- a/pkg/config/server_config.go +++ b/pkg/config/server_config.go @@ -107,17 +107,18 @@ var defaultServerConfig = &ServerConfig{ Count: 16, // Following configs are optimized for write throughput. // Users should not change them. - Concurrency: 256, - MaxOpenFiles: 10000, - BlockSize: 65536, - BlockCacheSize: 4294967296, - WriterBufferSize: 8388608, - Compression: "snappy", - TargetFileSizeBase: 8388608, - CompactionL0Trigger: 160, - WriteL0SlowdownTrigger: math.MaxInt32, - WriteL0PauseTrigger: math.MaxInt32, - CleanupSpeedLimit: 10000, + Concurrency: 256, + MaxOpenFiles: 10000, + BlockSize: 65536, + BlockCacheSize: 4294967296, + WriterBufferSize: 8388608, + Compression: "snappy", + TargetFileSizeBase: 8388608, + WriteL0SlowdownTrigger: math.MaxInt32, + WriteL0PauseTrigger: math.MaxInt32, + CompactionL0Trigger: 160, + CompactionDeletionThreshold: 160000, + CleanupSpeedLimit: 10000, }, Messages: defaultMessageConfig.Clone(), }, diff --git a/pkg/config/sink.go b/pkg/config/sink.go index 272d252d736..1bf0338237c 100644 --- a/pkg/config/sink.go +++ b/pkg/config/sink.go @@ -21,6 +21,13 @@ import ( cerror "github.com/pingcap/tiflow/pkg/errors" ) +// ForceEnableOldValueProtocols specifies which protocols need to be forced to enable old value. +var ForceEnableOldValueProtocols = []string{ + ProtocolCanal.String(), + ProtocolCanalJSON.String(), + ProtocolMaxwell.String(), +} + // SinkConfig represents sink config for a changefeed type SinkConfig struct { DispatchRules []*DispatchRule `toml:"dispatchers" json:"dispatchers"` @@ -40,14 +47,14 @@ type ColumnSelector struct { } func (s *SinkConfig) validate(enableOldValue bool) error { - protocol := s.Protocol if !enableOldValue { - switch protocol { - case ProtocolCanal.String(), ProtocolCanalJSON.String(), ProtocolMaxwell.String(): - log.Error(fmt.Sprintf("Old value is not enabled when using `%s` protocol. "+ - "Please update changefeed config", protocol)) - return cerror.WrapError(cerror.ErrKafkaInvalidConfig, - errors.New(fmt.Sprintf("%s protocol requires old value to be enabled", protocol))) + for _, protocolStr := range ForceEnableOldValueProtocols { + if protocolStr == s.Protocol { + log.Error(fmt.Sprintf("Old value is not enabled when using `%s` protocol. "+ + "Please update changefeed config", s.Protocol)) + return cerror.WrapError(cerror.ErrKafkaInvalidConfig, + errors.New(fmt.Sprintf("%s protocol requires old value to be enabled", s.Protocol))) + } } } diff --git a/pkg/db/db.go b/pkg/db/db.go index 040a4212e2a..c4f54904f7e 100644 --- a/pkg/db/db.go +++ b/pkg/db/db.go @@ -17,6 +17,7 @@ package db type DB interface { Snapshot() (Snapshot, error) Batch(cap int) Batch + Compact(start, end []byte) error Close() error CollectMetrics(captureAddr string, id int) } diff --git a/pkg/db/db_test.go b/pkg/db/db_test.go index 17d930706d5..c6a84de83c0 100644 --- a/pkg/db/db_test.go +++ b/pkg/db/db_test.go @@ -14,7 +14,11 @@ package db import ( + "bytes" "context" + "fmt" + "math" + "math/rand" "path/filepath" "testing" "time" @@ -114,6 +118,9 @@ func testDB(t *testing.T, db DB) { lsnap.Release() require.Nil(t, snap.Release()) + // Compact + require.Nil(t, db.Compact([]byte{0x00}, []byte{0xff})) + // Close require.Nil(t, db.Close()) require.Nil(t, ldb.Close()) @@ -162,3 +169,94 @@ func TestPebbleMetrics(t *testing.T) { require.Nil(t, pdb.Close()) } + +// goos: linux +// goarch: amd64 +// pkg: github.com/pingcap/tiflow/pkg/db +// cpu: Intel(R) Xeon(R) CPU E5-2630 v4 @ 2.20GHz +// BenchmarkNext/leveldb/next_1_event(s)-40 4492518 272.8 ns/op 0 B/op 0 allocs/op +// BenchmarkNext/leveldb/next_4_event(s)-40 1218038 1023 ns/op 0 B/op 0 allocs/op +// BenchmarkNext/leveldb/next_16_event(s)-40 269282 4062 ns/op 0 B/op 0 allocs/op +// BenchmarkNext/leveldb/next_64_event(s)-40 72012 16933 ns/op 0 B/op 0 allocs/op +// BenchmarkNext/leveldb/next_256_event(s)-40 19056 65554 ns/op 0 B/op 0 allocs/op +// BenchmarkNext/leveldb/next_1024_event(s)-40 4311 303426 ns/op 0 B/op 0 allocs/op +// BenchmarkNext/leveldb/next_4096_event(s)-40 853 1248045 ns/op 1 B/op 0 allocs/op +// BenchmarkNext/leveldb/next_16384_event(s)-40 230 4902989 ns/op 43826 B/op 389 allocs/op +// BenchmarkNext/leveldb/next_65536_event(s)-40 25 240067525 ns/op 1687187 B/op 1708 allocs/op +// BenchmarkNext/leveldb/next_262144_event(s)-40 4 285807572 ns/op 840336 B/op 6532 allocs/op +// BenchmarkNext/pebble/next_1_event(s)-40 4241365 284.0 ns/op 0 B/op 0 allocs/op +// BenchmarkNext/pebble/next_4_event(s)-40 1844215 683.1 ns/op 0 B/op 0 allocs/op +// BenchmarkNext/pebble/next_16_event(s)-40 533388 2438 ns/op 0 B/op 0 allocs/op +// BenchmarkNext/pebble/next_64_event(s)-40 118070 8653 ns/op 0 B/op 0 allocs/op +// BenchmarkNext/pebble/next_256_event(s)-40 34298 37768 ns/op 0 B/op 0 allocs/op +// BenchmarkNext/pebble/next_1024_event(s)-40 3860 259939 ns/op 5 B/op 0 allocs/op +// BenchmarkNext/pebble/next_4096_event(s)-40 946 1194918 ns/op 20 B/op 0 allocs/op +// BenchmarkNext/pebble/next_16384_event(s)-40 331 3577048 ns/op 77 B/op 0 allocs/op +// BenchmarkNext/pebble/next_65536_event(s)-40 40 27640122 ns/op 651 B/op 0 allocs/op +// BenchmarkNext/pebble/next_262144_event(s)-40 7 149654135 ns/op 5512 B/op 3 allocs/op +func BenchmarkNext(b *testing.B) { + ctx := context.Background() + cfg := config.GetDefaultServerConfig().Clone().Debug.DB + cfg.Count = 1 + + cases := []struct { + name string + dbfn func(name string) DB + }{{ + name: "leveldb", + dbfn: func(name string) DB { + db, err := OpenLevelDB(ctx, 1, filepath.Join(b.TempDir(), name), cfg) + require.Nil(b, err) + return db + }, + }, { + name: "pebble", + dbfn: func(name string) DB { + db, err := OpenPebble(ctx, 1, filepath.Join(b.TempDir(), name), cfg) + require.Nil(b, err) + return db + }, + }} + + rd := rand.New(rand.NewSource(0)) + for _, cs := range cases { + b.Run(cs.name, func(b *testing.B) { + for exp := 0; exp < 10; exp++ { + count := int(math.Pow(4, float64(exp))) + db := cs.dbfn(fmt.Sprintf("%s-%d", cs.name, count)) + batch := db.Batch(256) + // Key length for typical workload, see sorter/encoding/key.go + // 4 + 8 + 8 + 8 + 2 + 44 (key length, obtain by sst_dump a tikv sst file) + key := [74]byte{} + // Value length for typical workload, see sorter/encoding/value.go + // 128 + 314 (key length, obtain by sst_dump a tikv sst file) + value := [442]byte{} + for i := 0; i < count; i++ { + n, err := rd.Read(key[:]) + require.EqualValues(b, len(key), n) + require.Nil(b, err) + n, err = rd.Read(value[:]) + require.EqualValues(b, len(value), n) + require.Nil(b, err) + batch.Put(key[:], value[:]) + if batch.Count() == 256 { + require.Nil(b, batch.Commit()) + batch.Reset() + } + } + require.Nil(b, batch.Commit()) + b.ResetTimer() + + b.Run(fmt.Sprintf("next %d event(s)", count), func(b *testing.B) { + snap, err := db.Snapshot() + require.Nil(b, err) + iter := snap.Iterator([]byte{}, bytes.Repeat([]byte{0xff}, len(key))) + for i := 0; i < b.N; i++ { + for ok := iter.First(); ok; ok = iter.Next() { + } + } + }) + } + }) + } +} diff --git a/pkg/db/leveldb.go b/pkg/db/leveldb.go index 66dcee2c638..9618fbfd0b2 100644 --- a/pkg/db/leveldb.go +++ b/pkg/db/leveldb.go @@ -98,6 +98,10 @@ func (p *levelDB) Batch(cap int) Batch { } } +func (p *levelDB) Compact(start, end []byte) error { + return p.db.CompactRange(util.Range{Start: start, Limit: end}) +} + func (p *levelDB) Close() error { return p.db.Close() } diff --git a/pkg/db/pebble.go b/pkg/db/pebble.go index 688731fd57b..54ef50ddbb2 100644 --- a/pkg/db/pebble.go +++ b/pkg/db/pebble.go @@ -148,6 +148,10 @@ func (p *pebbleDB) Batch(cap int) Batch { } } +func (p *pebbleDB) Compact(start, end []byte) error { + return p.db.Compact(start, end) +} + func (p *pebbleDB) Close() error { return p.db.Close() } diff --git a/pkg/orchestrator/reactor_state_test.go b/pkg/orchestrator/reactor_state_test.go index f9f83b58f51..8d51eaa6f4d 100644 --- a/pkg/orchestrator/reactor_state_test.go +++ b/pkg/orchestrator/reactor_state_test.go @@ -64,7 +64,7 @@ func (s *stateSuite) TestChangefeedStateUpdate(c *check.C) { "/tidb/cdc/capture/6bbc01c8-0605-4f86-a0f9-b3119109b225", }, updateValue: []string{ - `{"sink-uri":"blackhole://","opts":{},"create-time":"2020-02-02T00:00:00.000000+00:00","start-ts":421980685886554116,"target-ts":0,"admin-job-type":0,"sort-engine":"memory","sort-dir":"","config":{"case-sensitive":true,"enable-old-value":false,"force-replicate":false,"check-gc-safe-point":true,"filter":{"rules":["*.*"],"ignore-txn-start-ts":null,"ddl-allow-list":null},"mounter":{"worker-num":16},"sink":{"dispatchers":null,"protocol":"default"},"cyclic-replication":{"enable":false,"replica-id":0,"filter-replica-ids":null,"id-buckets":0,"sync-ddl":false},"scheduler":{"type":"table-number","polling-time":-1},"consistent":{"level":"normal","storage":"local"}},"state":"normal","history":null,"error":null,"sync-point-enabled":false,"sync-point-interval":600000000000}`, + `{"sink-uri":"blackhole://","opts":{},"create-time":"2020-02-02T00:00:00.000000+00:00","start-ts":421980685886554116,"target-ts":0,"admin-job-type":0,"sort-engine":"memory","sort-dir":"","config":{"case-sensitive":true,"enable-old-value":false,"force-replicate":false,"check-gc-safe-point":true,"filter":{"rules":["*.*"],"ignore-txn-start-ts":null,"ddl-allow-list":null},"mounter":{"worker-num":16},"sink":{"dispatchers":null,"protocol":"open-protocol"},"cyclic-replication":{"enable":false,"replica-id":0,"filter-replica-ids":null,"id-buckets":0,"sync-ddl":false},"scheduler":{"type":"table-number","polling-time":-1},"consistent":{"level":"normal","storage":"local"}},"state":"normal","history":null,"error":null,"sync-point-enabled":false,"sync-point-interval":600000000000}`, `{"resolved-ts":421980720003809281,"checkpoint-ts":421980719742451713,"admin-job-type":0}`, `{"checkpoint-ts":421980720003809281,"resolved-ts":421980720003809281,"count":0,"error":null}`, `{"tables":{"45":{"start-ts":421980685886554116,"mark-table-id":0}},"operation":null,"admin-job-type":0}`, @@ -86,7 +86,7 @@ func (s *stateSuite) TestChangefeedStateUpdate(c *check.C) { CheckGCSafePoint: true, Filter: &config.FilterConfig{Rules: []string{"*.*"}}, Mounter: &config.MounterConfig{WorkerNum: 16}, - Sink: &config.SinkConfig{Protocol: "default"}, + Sink: &config.SinkConfig{Protocol: "open-protocol"}, Cyclic: &config.CyclicConfig{}, Scheduler: &config.SchedulerConfig{Tp: "table-number", PollingTime: -1}, Consistent: &config.ConsistentConfig{Level: "normal", Storage: "local"}, @@ -121,7 +121,7 @@ func (s *stateSuite) TestChangefeedStateUpdate(c *check.C) { "/tidb/cdc/capture/666777888", }, updateValue: []string{ - `{"sink-uri":"blackhole://","opts":{},"create-time":"2020-02-02T00:00:00.000000+00:00","start-ts":421980685886554116,"target-ts":0,"admin-job-type":0,"sort-engine":"memory","sort-dir":"","config":{"case-sensitive":true,"enable-old-value":false,"force-replicate":false,"check-gc-safe-point":true,"filter":{"rules":["*.*"],"ignore-txn-start-ts":null,"ddl-allow-list":null},"mounter":{"worker-num":16},"sink":{"dispatchers":null,"protocol":"default"},"cyclic-replication":{"enable":false,"replica-id":0,"filter-replica-ids":null,"id-buckets":0,"sync-ddl":false},"scheduler":{"type":"table-number","polling-time":-1},"consistent":{"level":"normal","storage":"local"}},"state":"normal","history":null,"error":null,"sync-point-enabled":false,"sync-point-interval":600000000000}`, + `{"sink-uri":"blackhole://","opts":{},"create-time":"2020-02-02T00:00:00.000000+00:00","start-ts":421980685886554116,"target-ts":0,"admin-job-type":0,"sort-engine":"memory","sort-dir":"","config":{"case-sensitive":true,"enable-old-value":false,"force-replicate":false,"check-gc-safe-point":true,"filter":{"rules":["*.*"],"ignore-txn-start-ts":null,"ddl-allow-list":null},"mounter":{"worker-num":16},"sink":{"dispatchers":null,"protocol":"open-protocol"},"cyclic-replication":{"enable":false,"replica-id":0,"filter-replica-ids":null,"id-buckets":0,"sync-ddl":false},"scheduler":{"type":"table-number","polling-time":-1},"consistent":{"level":"normal","storage":"local"}},"state":"normal","history":null,"error":null,"sync-point-enabled":false,"sync-point-interval":600000000000}`, `{"resolved-ts":421980720003809281,"checkpoint-ts":421980719742451713,"admin-job-type":0}`, `{"checkpoint-ts":421980720003809281,"resolved-ts":421980720003809281,"count":0,"error":null}`, `{"tables":{"45":{"start-ts":421980685886554116,"mark-table-id":0}},"operation":null,"admin-job-type":0}`, @@ -147,7 +147,7 @@ func (s *stateSuite) TestChangefeedStateUpdate(c *check.C) { CheckGCSafePoint: true, Filter: &config.FilterConfig{Rules: []string{"*.*"}}, Mounter: &config.MounterConfig{WorkerNum: 16}, - Sink: &config.SinkConfig{Protocol: "default"}, + Sink: &config.SinkConfig{Protocol: "open-protocol"}, Cyclic: &config.CyclicConfig{}, Scheduler: &config.SchedulerConfig{Tp: "table-number", PollingTime: -1}, Consistent: &config.ConsistentConfig{Level: "normal", Storage: "local"}, @@ -188,7 +188,7 @@ func (s *stateSuite) TestChangefeedStateUpdate(c *check.C) { "/tidb/cdc/task/workload/6bbc01c8-0605-4f86-a0f9-b3119109b225/test-fake", }, updateValue: []string{ - `{"sink-uri":"blackhole://","opts":{},"create-time":"2020-02-02T00:00:00.000000+00:00","start-ts":421980685886554116,"target-ts":0,"admin-job-type":0,"sort-engine":"memory","sort-dir":"","config":{"case-sensitive":true,"enable-old-value":false,"force-replicate":false,"check-gc-safe-point":true,"filter":{"rules":["*.*"],"ignore-txn-start-ts":null,"ddl-allow-list":null},"mounter":{"worker-num":16},"sink":{"dispatchers":null,"protocol":"default"},"cyclic-replication":{"enable":false,"replica-id":0,"filter-replica-ids":null,"id-buckets":0,"sync-ddl":false},"scheduler":{"type":"table-number","polling-time":-1},"consistent":{"level":"normal","storage":"local"}},"state":"normal","history":null,"error":null,"sync-point-enabled":false,"sync-point-interval":600000000000}`, + `{"sink-uri":"blackhole://","opts":{},"create-time":"2020-02-02T00:00:00.000000+00:00","start-ts":421980685886554116,"target-ts":0,"admin-job-type":0,"sort-engine":"memory","sort-dir":"","config":{"case-sensitive":true,"enable-old-value":false,"force-replicate":false,"check-gc-safe-point":true,"filter":{"rules":["*.*"],"ignore-txn-start-ts":null,"ddl-allow-list":null},"mounter":{"worker-num":16},"sink":{"dispatchers":null,"protocol":"open-protocol"},"cyclic-replication":{"enable":false,"replica-id":0,"filter-replica-ids":null,"id-buckets":0,"sync-ddl":false},"scheduler":{"type":"table-number","polling-time":-1},"consistent":{"level":"normal","storage":"local"}},"state":"normal","history":null,"error":null,"sync-point-enabled":false,"sync-point-interval":600000000000}`, `{"resolved-ts":421980720003809281,"checkpoint-ts":421980719742451713,"admin-job-type":0}`, `{"checkpoint-ts":421980720003809281,"resolved-ts":421980720003809281,"count":0,"error":null}`, `{"tables":{"45":{"start-ts":421980685886554116,"mark-table-id":0}},"operation":null,"admin-job-type":0}`, @@ -215,7 +215,7 @@ func (s *stateSuite) TestChangefeedStateUpdate(c *check.C) { CheckGCSafePoint: true, Filter: &config.FilterConfig{Rules: []string{"*.*"}}, Mounter: &config.MounterConfig{WorkerNum: 16}, - Sink: &config.SinkConfig{Protocol: "default"}, + Sink: &config.SinkConfig{Protocol: "open-protocol"}, Cyclic: &config.CyclicConfig{}, Scheduler: &config.SchedulerConfig{Tp: "table-number", PollingTime: -1}, Consistent: &config.ConsistentConfig{Level: "normal", Storage: "local"}, @@ -257,7 +257,7 @@ func (s *stateSuite) TestChangefeedStateUpdate(c *check.C) { "/tidb/cdc/task/status/666777888/test1", }, updateValue: []string{ - `{"sink-uri":"blackhole://","opts":{},"create-time":"2020-02-02T00:00:00.000000+00:00","start-ts":421980685886554116,"target-ts":0,"admin-job-type":0,"sort-engine":"memory","sort-dir":"","config":{"case-sensitive":true,"enable-old-value":false,"force-replicate":false,"check-gc-safe-point":true,"filter":{"rules":["*.*"],"ignore-txn-start-ts":null,"ddl-allow-list":null},"mounter":{"worker-num":16},"sink":{"dispatchers":null,"protocol":"default"},"cyclic-replication":{"enable":false,"replica-id":0,"filter-replica-ids":null,"id-buckets":0,"sync-ddl":false},"scheduler":{"type":"table-number","polling-time":-1},"consistent":{"level":"normal","storage":"local"}},"state":"normal","history":null,"error":null,"sync-point-enabled":false,"sync-point-interval":600000000000}`, + `{"sink-uri":"blackhole://","opts":{},"create-time":"2020-02-02T00:00:00.000000+00:00","start-ts":421980685886554116,"target-ts":0,"admin-job-type":0,"sort-engine":"memory","sort-dir":"","config":{"case-sensitive":true,"enable-old-value":false,"force-replicate":false,"check-gc-safe-point":true,"filter":{"rules":["*.*"],"ignore-txn-start-ts":null,"ddl-allow-list":null},"mounter":{"worker-num":16},"sink":{"dispatchers":null,"protocol":"open-protocol"},"cyclic-replication":{"enable":false,"replica-id":0,"filter-replica-ids":null,"id-buckets":0,"sync-ddl":false},"scheduler":{"type":"table-number","polling-time":-1},"consistent":{"level":"normal","storage":"local"}},"state":"normal","history":null,"error":null,"sync-point-enabled":false,"sync-point-interval":600000000000}`, `{"resolved-ts":421980720003809281,"checkpoint-ts":421980719742451713,"admin-job-type":0}`, `{"checkpoint-ts":421980720003809281,"resolved-ts":421980720003809281,"count":0,"error":null}`, `{"tables":{"45":{"start-ts":421980685886554116,"mark-table-id":0}},"operation":null,"admin-job-type":0}`, diff --git a/pkg/p2p/client.go b/pkg/p2p/client.go index ddfee8cc25e..4b1d9c66746 100644 --- a/pkg/p2p/client.go +++ b/pkg/p2p/client.go @@ -394,6 +394,11 @@ func (c *MessageClient) TrySendMessage(ctx context.Context, topic Topic, value i failpoint.Return(0, cerrors.ErrPeerMessageSendTryAgain.GenWithStackByArgs()) }) + // FIXME (zixiong): This is a temporary way for testing whether the caller can handler this error. + failpoint.Inject("ClientInjectClosed", func() { + failpoint.Return(0, cerrors.ErrPeerMessageClientClosed.GenWithStackByArgs()) + }) + return c.sendMessage(ctx, topic, value, true) } diff --git a/pkg/p2p/client_test.go b/pkg/p2p/client_test.go index 9254ef0b771..34835fb0d3c 100644 --- a/pkg/p2p/client_test.go +++ b/pkg/p2p/client_test.go @@ -300,7 +300,7 @@ func TestClientSendAnomalies(t *testing.T) { client.connector = connector connector.On("Connect", mock.Anything).Return(grpcClient, func() {}, nil) - grpcStream := newMockSendMessageClient(ctx) + grpcStream := newMockSendMessageClient(runCtx) grpcClient.On("SendMessage", mock.Anything, []grpc.CallOption(nil)).Return( grpcStream, nil, @@ -315,7 +315,6 @@ func TestClientSendAnomalies(t *testing.T) { ClientVersion: "v5.4.0", SenderAdvertisedAddr: "fake-addr:8300", }, packet.Meta) - closeClient() }) grpcStream.On("Recv").Return(nil, nil) @@ -335,11 +334,14 @@ func TestClientSendAnomalies(t *testing.T) { require.Regexp(t, ".*ErrPeerMessageSendTryAgain.*", err.Error()) // Test point 2: close the client while SendMessage is blocking. + go func() { + time.Sleep(100 * time.Millisecond) + closeClient() + }() _, err = client.SendMessage(ctx, "test-topic", &testMessage{Value: 1}) require.Error(t, err) require.Regexp(t, ".*ErrPeerMessageClientClosed.*", err.Error()) - closeClient() wg.Wait() // Test point 3: call SendMessage after the client is closed. diff --git a/tests/integration_tests/autorandom/run.sh b/tests/integration_tests/autorandom/run.sh index 3c5d16b06c9..1260bcc5557 100644 --- a/tests/integration_tests/autorandom/run.sh +++ b/tests/integration_tests/autorandom/run.sh @@ -22,12 +22,12 @@ function run() { TOPIC_NAME="ticdc-autorandom-test-$RANDOM" case $SINK_TYPE in - kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; + kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; *) SINK_URI="mysql://normal:123456@127.0.0.1:3306/" ;; esac cdc cli changefeed create --start-ts=$start_ts --sink-uri="$SINK_URI" if [ "$SINK_TYPE" == "kafka" ]; then - run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" + run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" fi run_sql_file $CUR/data/test.sql ${UP_TIDB_HOST} ${UP_TIDB_PORT} # sync_diff can't check non-exist table, so we check expected tables are created in downstream first diff --git a/tests/integration_tests/batch_add_table/run.sh b/tests/integration_tests/batch_add_table/run.sh index c2f543c0c2d..0978027b7be 100644 --- a/tests/integration_tests/batch_add_table/run.sh +++ b/tests/integration_tests/batch_add_table/run.sh @@ -24,12 +24,12 @@ function run() { TOPIC_NAME="ticdc-batch-add-table-test-$RANDOM" case $SINK_TYPE in - kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; + kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; *) SINK_URI="mysql://normal:123456@127.0.0.1:3306/" ;; esac run_cdc_cli changefeed create --start-ts=$start_ts --sink-uri="$SINK_URI" if [ "$SINK_TYPE" == "kafka" ]; then - run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" + run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" fi run_sql_file $CUR/data/test.sql ${UP_TIDB_HOST} ${UP_TIDB_PORT} diff --git a/tests/integration_tests/capture_session_done_during_task/run.sh b/tests/integration_tests/capture_session_done_during_task/run.sh index 0205d74943d..ac42c3df067 100644 --- a/tests/integration_tests/capture_session_done_during_task/run.sh +++ b/tests/integration_tests/capture_session_done_during_task/run.sh @@ -16,7 +16,7 @@ function run() { pd_addr="http://$UP_PD_HOST_1:$UP_PD_PORT_1" TOPIC_NAME="ticdc-capture-session-done-during-task-$RANDOM" case $SINK_TYPE in - kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; + kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; *) SINK_URI="mysql://normal:123456@127.0.0.1:3306/?max-txn-row=1" ;; esac @@ -35,7 +35,7 @@ function run() { sleep 1 if [ "$SINK_TYPE" == "kafka" ]; then - run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" + run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" fi capture_key=$(ETCDCTL_API=3 etcdctl get /tidb/cdc/capture --prefix | head -n 1) diff --git a/tests/integration_tests/cdc/run.sh b/tests/integration_tests/cdc/run.sh index 4ca76889705..e9eed66427b 100755 --- a/tests/integration_tests/cdc/run.sh +++ b/tests/integration_tests/cdc/run.sh @@ -23,12 +23,12 @@ function prepare() { TOPIC_NAME="ticdc-cdc-test-$RANDOM" case $SINK_TYPE in - kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; + kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; *) SINK_URI="mysql://normal:123456@127.0.0.1:3306/" ;; esac run_cdc_cli changefeed create --start-ts=$start_ts --sink-uri="$SINK_URI" if [ "$SINK_TYPE" == "kafka" ]; then - run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" + run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" fi } diff --git a/tests/integration_tests/changefeed_auto_stop/run.sh b/tests/integration_tests/changefeed_auto_stop/run.sh index 19ce910a00b..305fe9e4528 100755 --- a/tests/integration_tests/changefeed_auto_stop/run.sh +++ b/tests/integration_tests/changefeed_auto_stop/run.sh @@ -50,12 +50,12 @@ function run() { TOPIC_NAME="ticdc-changefeed-auto-stop-test-$RANDOM" case $SINK_TYPE in - kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; + kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; *) SINK_URI="mysql://normal:123456@127.0.0.1:3306/" ;; esac changefeedid=$(cdc cli changefeed create --pd="http://${UP_PD_HOST_1}:${UP_PD_PORT_1}" --start-ts=$start_ts --sink-uri="$SINK_URI" 2>&1 | tail -n2 | head -n1 | awk '{print $2}') if [ "$SINK_TYPE" == "kafka" ]; then - run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" + run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" fi ensure 10 check_changefeed_state "http://${UP_PD_HOST_1}:${UP_PD_PORT_1}" ${changefeedid} "normal" "null" diff --git a/tests/integration_tests/changefeed_error/run.sh b/tests/integration_tests/changefeed_error/run.sh index 6c5b878c187..28d62dda939 100755 --- a/tests/integration_tests/changefeed_error/run.sh +++ b/tests/integration_tests/changefeed_error/run.sh @@ -120,13 +120,13 @@ function run() { TOPIC_NAME="ticdc-sink-retry-test-$RANDOM" case $SINK_TYPE in - kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; + kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; *) SINK_URI="mysql://normal:123456@127.0.0.1:3306/?max-txn-row=1" ;; esac changefeedid="changefeed-error" run_cdc_cli changefeed create --start-ts=$start_ts --sink-uri="$SINK_URI" -c $changefeedid if [ "$SINK_TYPE" == "kafka" ]; then - run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" + run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" fi ensure $MAX_RETRIES check_changefeed_mark_failed_regex http://${UP_PD_HOST_1}:${UP_PD_PORT_1} ${changefeedid} ".*CDC:ErrStartTsBeforeGC.*" diff --git a/tests/integration_tests/changefeed_finish/run.sh b/tests/integration_tests/changefeed_finish/run.sh index 9857cd32448..58cf6be60ee 100755 --- a/tests/integration_tests/changefeed_finish/run.sh +++ b/tests/integration_tests/changefeed_finish/run.sh @@ -37,7 +37,7 @@ function run() { pd_addr="http://$UP_PD_HOST_1:$UP_PD_PORT_1" TOPIC_NAME="ticdc-changefeed-pause-resume-$RANDOM" case $SINK_TYPE in - kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; + kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; *) SINK_URI="mysql://normal:123456@127.0.0.1:3306/?max-txn-row=1" ;; esac @@ -48,7 +48,7 @@ function run() { changefeed_id=$(cdc cli changefeed create --sink-uri="$SINK_URI" --target-ts=$target_ts 2>&1 | tail -n2 | head -n1 | awk '{print $2}') if [ "$SINK_TYPE" == "kafka" ]; then - run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" + run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" fi run_sql "CREATE DATABASE changefeed_finish;" ${UP_TIDB_HOST} ${UP_TIDB_PORT} diff --git a/tests/integration_tests/changefeed_pause_resume/run.sh b/tests/integration_tests/changefeed_pause_resume/run.sh index 9be9837944d..5ecbb82db91 100755 --- a/tests/integration_tests/changefeed_pause_resume/run.sh +++ b/tests/integration_tests/changefeed_pause_resume/run.sh @@ -17,14 +17,14 @@ function run() { pd_addr="http://$UP_PD_HOST_1:$UP_PD_PORT_1" TOPIC_NAME="ticdc-changefeed-pause-resume-$RANDOM" case $SINK_TYPE in - kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; + kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; *) SINK_URI="mysql://normal:123456@127.0.0.1:3306/?max-txn-row=1" ;; esac run_cdc_server --workdir $WORK_DIR --binary $CDC_BINARY --addr "127.0.0.1:8300" --pd $pd_addr changefeed_id=$(cdc cli changefeed create --pd=$pd_addr --sink-uri="$SINK_URI" 2>&1 | tail -n2 | head -n1 | awk '{print $2}') if [ "$SINK_TYPE" == "kafka" ]; then - run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" + run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" fi run_sql "CREATE DATABASE changefeed_pause_resume;" ${UP_TIDB_HOST} ${UP_TIDB_PORT} diff --git a/tests/integration_tests/changefeed_reconstruct/run.sh b/tests/integration_tests/changefeed_reconstruct/run.sh index 54b47bc4601..439077dc189 100755 --- a/tests/integration_tests/changefeed_reconstruct/run.sh +++ b/tests/integration_tests/changefeed_reconstruct/run.sh @@ -40,7 +40,7 @@ function run() { pd_addr="http://$UP_PD_HOST_1:$UP_PD_PORT_1" TOPIC_NAME="ticdc-changefeed-reconstruct-$RANDOM" case $SINK_TYPE in - kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; + kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; *) SINK_URI="mysql://normal:123456@127.0.0.1:3306/?max-txn-row=1" ;; esac @@ -48,7 +48,7 @@ function run() { owner_pid=$(ps -C $CDC_BINARY -o pid= | awk '{print $1}') changefeed_id=$(cdc cli changefeed create --pd=$pd_addr --sink-uri="$SINK_URI" 2>&1 | tail -n2 | head -n1 | awk '{print $2}') if [ "$SINK_TYPE" == "kafka" ]; then - run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" + run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" fi run_sql "CREATE DATABASE changefeed_reconstruct;" ${UP_TIDB_HOST} ${UP_TIDB_PORT} diff --git a/tests/integration_tests/cli/run.sh b/tests/integration_tests/cli/run.sh index ce07735d410..feebb2a24de 100644 --- a/tests/integration_tests/cli/run.sh +++ b/tests/integration_tests/cli/run.sh @@ -48,14 +48,14 @@ function run() { TOPIC_NAME="ticdc-cli-test-$RANDOM" case $SINK_TYPE in - kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; + kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; *) SINK_URI="mysql://normal:123456@127.0.0.1:3306/" ;; esac uuid="custom-changefeed-name" run_cdc_cli changefeed create --start-ts=$start_ts --sort-engine=memory --sink-uri="$SINK_URI" --tz="Asia/Shanghai" -c="$uuid" if [ "$SINK_TYPE" == "kafka" ]; then - run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" + run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" fi run_cdc_cli changefeed cyclic create-marktables \ @@ -151,7 +151,7 @@ EOF # Test Kafka SSL connection. if [ "$SINK_TYPE" == "kafka" ]; then SSL_TOPIC_NAME="ticdc-cli-test-ssl-$RANDOM" - SINK_URI="kafka://127.0.0.1:9093/$SSL_TOPIC_NAME?ca=${TLS_DIR}/ca.pem&cert=${TLS_DIR}/client.pem&key=${TLS_DIR}/client-key.pem&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" + SINK_URI="kafka://127.0.0.1:9093/$SSL_TOPIC_NAME?protocol=open-protocol&ca=${TLS_DIR}/ca.pem&cert=${TLS_DIR}/client.pem&key=${TLS_DIR}/client-key.pem&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" run_cdc_cli changefeed create --start-ts=$start_ts --sink-uri="$SINK_URI" --tz="Asia/Shanghai" fi diff --git a/tests/integration_tests/clustered_index/run.sh b/tests/integration_tests/clustered_index/run.sh index f25ecc27ad0..b16ceb53314 100755 --- a/tests/integration_tests/clustered_index/run.sh +++ b/tests/integration_tests/clustered_index/run.sh @@ -22,12 +22,12 @@ function run() { TOPIC_NAME="ticdc-clustered-index-test-$RANDOM" case $SINK_TYPE in - kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; + kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; *) SINK_URI="mysql://normal:123456@127.0.0.1:3306/" ;; esac cdc cli changefeed create --start-ts=$start_ts --sink-uri="$SINK_URI" if [ "$SINK_TYPE" == "kafka" ]; then - run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" + run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" fi run_sql "set global tidb_enable_clustered_index=1;" ${UP_TIDB_HOST} ${UP_TIDB_PORT} # TiDB global variables cache 2 seconds at most diff --git a/tests/integration_tests/common_1/run.sh b/tests/integration_tests/common_1/run.sh index 07ebc1193a9..d7512d3121c 100644 --- a/tests/integration_tests/common_1/run.sh +++ b/tests/integration_tests/common_1/run.sh @@ -37,12 +37,12 @@ function run() { # can't use the normal user TOPIC_NAME="ticdc-common-1-test-$RANDOM" case $SINK_TYPE in - kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; + kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; *) SINK_URI="mysql://root@127.0.0.1:3306/" ;; esac run_cdc_cli changefeed create --start-ts=$start_ts --sink-uri="$SINK_URI" if [ "$SINK_TYPE" == "kafka" ]; then - run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" + run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" fi run_sql_file $CUR/data/test.sql ${UP_TIDB_HOST} ${UP_TIDB_PORT} diff --git a/tests/integration_tests/consistent_replicate_nfs/run.sh b/tests/integration_tests/consistent_replicate_nfs/run.sh index 3219102055a..3ffaf788638 100644 --- a/tests/integration_tests/consistent_replicate_nfs/run.sh +++ b/tests/integration_tests/consistent_replicate_nfs/run.sh @@ -39,12 +39,12 @@ function run() { TOPIC_NAME="ticdc-sink-retry-test-$RANDOM" case $SINK_TYPE in - kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&max-message-bytes=102400&kafka-version=${KAFKA_VERSION}" ;; + kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&max-message-bytes=102400&kafka-version=${KAFKA_VERSION}" ;; *) SINK_URI="mysql://normal:123456@127.0.0.1:3306/" ;; esac changefeed_id=$(cdc cli changefeed create --sink-uri="$SINK_URI" --config="$CUR/conf/changefeed.toml" 2>&1 | tail -n2 | head -n1 | awk '{print $2}') if [ "$SINK_TYPE" == "kafka" ]; then - run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&version=${KAFKA_VERSION}" + run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}" fi run_sql "CREATE DATABASE consistent_replicate_nfs;" ${UP_TIDB_HOST} ${UP_TIDB_PORT} diff --git a/tests/integration_tests/consistent_replicate_s3/run.sh b/tests/integration_tests/consistent_replicate_s3/run.sh index 653bb21101c..253244cfc8c 100644 --- a/tests/integration_tests/consistent_replicate_s3/run.sh +++ b/tests/integration_tests/consistent_replicate_s3/run.sh @@ -72,12 +72,12 @@ function run() { TOPIC_NAME="ticdc-sink-retry-test-$RANDOM" case $SINK_TYPE in - kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&max-message-bytes=102400&kafka-version=${KAFKA_VERSION}" ;; + kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&max-message-bytes=102400&kafka-version=${KAFKA_VERSION}" ;; *) SINK_URI="mysql://normal:123456@127.0.0.1:3306/" ;; esac changefeed_id=$(cdc cli changefeed create --sink-uri="$SINK_URI" --config="$CUR/conf/changefeed.toml" 2>&1 | tail -n2 | head -n1 | awk '{print $2}') if [ "$SINK_TYPE" == "kafka" ]; then - run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&version=${KAFKA_VERSION}" + run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}" fi run_sql "CREATE DATABASE consistent_replicate_s3;" ${UP_TIDB_HOST} ${UP_TIDB_PORT} diff --git a/tests/integration_tests/ddl_attributes/run.sh b/tests/integration_tests/ddl_attributes/run.sh index c1624d3e756..a8e8a7a65ab 100644 --- a/tests/integration_tests/ddl_attributes/run.sh +++ b/tests/integration_tests/ddl_attributes/run.sh @@ -22,12 +22,12 @@ function run() { TOPIC_NAME="ticdc-ddl-attributes-test-$RANDOM" case $SINK_TYPE in - kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&kafka-version=${KAFKA_VERSION}" ;; + kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-version=${KAFKA_VERSION}" ;; *) SINK_URI="mysql://root@127.0.0.1:3306/" ;; esac run_cdc_cli changefeed create --start-ts=$start_ts --sink-uri="$SINK_URI" if [ "$SINK_TYPE" == "kafka" ]; then - run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&version=${KAFKA_VERSION}" + run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}" fi run_sql_file $CUR/data/prepare.sql ${UP_TIDB_HOST} ${UP_TIDB_PORT} # sync_diff can't check non-exist table, so we check expected tables are created in downstream first diff --git a/tests/integration_tests/ddl_puller_lag/run.sh b/tests/integration_tests/ddl_puller_lag/run.sh index 8f9595d7e0a..959deee203c 100644 --- a/tests/integration_tests/ddl_puller_lag/run.sh +++ b/tests/integration_tests/ddl_puller_lag/run.sh @@ -25,12 +25,12 @@ function prepare() { TOPIC_NAME="ticdc-ddl-puller-lag-test-$RANDOM" case $SINK_TYPE in - kafka) SINK_URI="kafka+ssl://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&kafka-client-id=ddl_puller_lag&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; + kafka) SINK_URI="kafka+ssl://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-client-id=ddl_puller_lag&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; *) SINK_URI="mysql+ssl://normal:123456@127.0.0.1:3306/" ;; esac run_cdc_cli changefeed create --start-ts=$start_ts --sink-uri="$SINK_URI" if [ "$SINK_TYPE" == "kafka" ]; then - run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" + run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" fi } diff --git a/tests/integration_tests/ddl_sequence/run.sh b/tests/integration_tests/ddl_sequence/run.sh index 8ab4b80e71f..07bb628ab9a 100644 --- a/tests/integration_tests/ddl_sequence/run.sh +++ b/tests/integration_tests/ddl_sequence/run.sh @@ -22,12 +22,12 @@ function run() { TOPIC_NAME="ticdc-ddl-sequence-test-$RANDOM" case $SINK_TYPE in - kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; + kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; *) SINK_URI="mysql://normal:123456@127.0.0.1:3306/" ;; esac run_cdc_cli changefeed create --start-ts=$start_ts --sink-uri="$SINK_URI" if [ "$SINK_TYPE" == "kafka" ]; then - run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" + run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" fi run_sql_file $CUR/data/prepare.sql ${UP_TIDB_HOST} ${UP_TIDB_PORT} # sync_diff can't check non-exist table, so we check expected tables are created in downstream first diff --git a/tests/integration_tests/drop_many_tables/run.sh b/tests/integration_tests/drop_many_tables/run.sh index e5a70e53157..ccfcd6e6957 100644 --- a/tests/integration_tests/drop_many_tables/run.sh +++ b/tests/integration_tests/drop_many_tables/run.sh @@ -22,12 +22,12 @@ function run() { TOPIC_NAME="ticdc-drop-tables-test-$RANDOM" case $SINK_TYPE in - kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; + kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; *) SINK_URI="mysql://normal:123456@127.0.0.1:3306/" ;; esac run_cdc_cli changefeed create --start-ts=$start_ts --sink-uri="$SINK_URI" if [ "$SINK_TYPE" == "kafka" ]; then - run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" + run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" fi run_sql_file $CUR/data/prepare.sql ${UP_TIDB_HOST} ${UP_TIDB_PORT} # sync_diff can't check non-exist table, so we check expected tables are created in downstream first diff --git a/tests/integration_tests/force_replicate_table/run.sh b/tests/integration_tests/force_replicate_table/run.sh index 0890524a943..fe2816e3bf2 100755 --- a/tests/integration_tests/force_replicate_table/run.sh +++ b/tests/integration_tests/force_replicate_table/run.sh @@ -59,12 +59,12 @@ function run() { TOPIC_NAME="ticdc-force_replicate_table-test-$RANDOM" case $SINK_TYPE in - kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; + kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; *) SINK_URI="mysql://normal:123456@127.0.0.1:3306/?safe-mode=true" ;; esac cdc cli changefeed create --start-ts=$start_ts --sink-uri="$SINK_URI" --config $CUR/conf/changefeed.toml if [ "$SINK_TYPE" == "kafka" ]; then - run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" + run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" fi run_sql_file $CUR/data/test.sql ${UP_TIDB_HOST} ${UP_TIDB_PORT} diff --git a/tests/integration_tests/gc_safepoint/run.sh b/tests/integration_tests/gc_safepoint/run.sh index fa28076e00b..46165f1195d 100755 --- a/tests/integration_tests/gc_safepoint/run.sh +++ b/tests/integration_tests/gc_safepoint/run.sh @@ -83,14 +83,14 @@ function run() { pd_addr="http://$UP_PD_HOST_1:$UP_PD_PORT_1" TOPIC_NAME="ticdc-gc-safepoint-$RANDOM" case $SINK_TYPE in - kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; + kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; *) SINK_URI="mysql://normal:123456@127.0.0.1:3306/?max-txn-row=1" ;; esac export GO_FAILPOINTS='github.com/pingcap/tiflow/pkg/txnutil/gc/InjectGcSafepointUpdateInterval=return(500)' run_cdc_server --workdir $WORK_DIR --binary $CDC_BINARY --addr "127.0.0.1:8300" --pd $pd_addr changefeed_id=$(cdc cli changefeed create --pd=$pd_addr --sink-uri="$SINK_URI" 2>&1 | tail -n2 | head -n1 | awk '{print $2}') if [ "$SINK_TYPE" == "kafka" ]; then - run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" + run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" fi clear_gc_worker_safepoint $pd_addr $pd_cluster_id diff --git a/tests/integration_tests/generate_column/run.sh b/tests/integration_tests/generate_column/run.sh index e701b123bc5..6ddbc25e246 100644 --- a/tests/integration_tests/generate_column/run.sh +++ b/tests/integration_tests/generate_column/run.sh @@ -22,12 +22,12 @@ function run() { TOPIC_NAME="ticdc-generate-column-test-$RANDOM" case $SINK_TYPE in - kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; + kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; *) SINK_URI="mysql://normal:123456@127.0.0.1:3306/" ;; esac run_cdc_cli changefeed create --start-ts=$start_ts --sink-uri="$SINK_URI" if [ "$SINK_TYPE" == "kafka" ]; then - run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" + run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" fi run_sql_file $CUR/data/prepare.sql ${UP_TIDB_HOST} ${UP_TIDB_PORT} # sync_diff can't check non-exist table, so we check expected tables are created in downstream first diff --git a/tests/integration_tests/kafka_messages/run.sh b/tests/integration_tests/kafka_messages/run.sh index 9f952728c9d..1fb75b5adfa 100755 --- a/tests/integration_tests/kafka_messages/run.sh +++ b/tests/integration_tests/kafka_messages/run.sh @@ -31,10 +31,10 @@ function run_length_limit() { run_cdc_server --workdir $WORK_DIR --binary $CDC_BINARY --loglevel "info" TOPIC_NAME="ticdc-kafka-message-test-$RANDOM" - SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" + SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" run_cdc_cli changefeed create --start-ts=$start_ts --sink-uri="$SINK_URI" if [ "$SINK_TYPE" == "kafka" ]; then - run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" + run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" fi # Add a check table to reduce check time, or if we check data with sync diff @@ -87,10 +87,10 @@ function run_batch_size_limit() { run_cdc_server --workdir $WORK_DIR --binary $CDC_BINARY --loglevel "info" TOPIC_NAME="ticdc-kafka-message-test-$RANDOM" - SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&max-batch-size=3&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" + SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&max-batch-size=3&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" run_cdc_cli changefeed create --start-ts=$start_ts --sink-uri="$SINK_URI" if [ "$SINK_TYPE" == "kafka" ]; then - run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760&max-batch-size=3" + run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760&max-batch-size=3" fi # Add a check table to reduce check time, or if we check data with sync diff diff --git a/tests/integration_tests/kafka_sink_error_resume/run.sh b/tests/integration_tests/kafka_sink_error_resume/run.sh index 1020d5cd233..0560852fa6b 100755 --- a/tests/integration_tests/kafka_sink_error_resume/run.sh +++ b/tests/integration_tests/kafka_sink_error_resume/run.sh @@ -37,12 +37,12 @@ function run() { pd_addr="http://$UP_PD_HOST_1:$UP_PD_PORT_1" TOPIC_NAME="ticdc-kafka-sink-error-resume-test-$RANDOM" - SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" + SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" export GO_FAILPOINTS='github.com/pingcap/tiflow/cdc/sink/producer/kafka/KafkaSinkAsyncSendError=4*return(true)' run_cdc_server --workdir $WORK_DIR --binary $CDC_BINARY --addr "127.0.0.1:8300" --pd $pd_addr changefeed_id=$(cdc cli changefeed create --pd=$pd_addr --sink-uri="$SINK_URI" 2>&1 | tail -n2 | head -n1 | awk '{print $2}') - run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" + run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" run_sql "CREATE DATABASE kafka_sink_error_resume;" ${UP_TIDB_HOST} ${UP_TIDB_PORT} run_sql "CREATE table kafka_sink_error_resume.t1(id int primary key auto_increment, val int);" ${UP_TIDB_HOST} ${UP_TIDB_PORT} diff --git a/tests/integration_tests/kv_client_stream_reconnect/run.sh b/tests/integration_tests/kv_client_stream_reconnect/run.sh index ab39329419e..ac01e967428 100644 --- a/tests/integration_tests/kv_client_stream_reconnect/run.sh +++ b/tests/integration_tests/kv_client_stream_reconnect/run.sh @@ -19,7 +19,7 @@ function run() { pd_addr="http://$UP_PD_HOST_1:$UP_PD_PORT_1" TOPIC_NAME="kv-client-stream-reconnect-$RANDOM" case $SINK_TYPE in - kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; + kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; *) SINK_URI="mysql://normal:123456@127.0.0.1:3306/?max-txn-row=1" ;; esac @@ -28,7 +28,7 @@ function run() { run_cdc_server --workdir $WORK_DIR --binary $CDC_BINARY --addr "127.0.0.1:8300" --pd $pd_addr changefeed_id=$(cdc cli changefeed create --pd=$pd_addr --sink-uri="$SINK_URI" 2>&1 | tail -n2 | head -n1 | awk '{print $2}') if [ "$SINK_TYPE" == "kafka" ]; then - run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" + run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" fi run_sql "CREATE DATABASE kv_client_stream_reconnect;" ${UP_TIDB_HOST} ${UP_TIDB_PORT} diff --git a/tests/integration_tests/many_pk_or_uk/run.sh b/tests/integration_tests/many_pk_or_uk/run.sh index cec30b7acc8..e1da4c9f4d4 100755 --- a/tests/integration_tests/many_pk_or_uk/run.sh +++ b/tests/integration_tests/many_pk_or_uk/run.sh @@ -23,12 +23,12 @@ function prepare() { TOPIC_NAME="ticdc-many-pk-or-uk-test-$RANDOM" case $SINK_TYPE in - kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; + kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; *) SINK_URI="mysql://normal:123456@127.0.0.1:3306/" ;; esac run_cdc_cli changefeed create --start-ts=$start_ts --sink-uri="$SINK_URI" if [ "$SINK_TYPE" == "kafka" ]; then - run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" + run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" fi } diff --git a/tests/integration_tests/move_table/run.sh b/tests/integration_tests/move_table/run.sh index 366c8074064..edd0b005662 100644 --- a/tests/integration_tests/move_table/run.sh +++ b/tests/integration_tests/move_table/run.sh @@ -27,13 +27,13 @@ function run() { TOPIC_NAME="ticdc-move-table-test-$RANDOM" case $SINK_TYPE in - kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; + kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; *) SINK_URI="mysql://normal:123456@127.0.0.1:3306/?max-txn-row=1" ;; esac run_cdc_cli changefeed create --start-ts=$start_ts --sink-uri="$SINK_URI" if [ "$SINK_TYPE" == "kafka" ]; then - run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" + run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" fi run_cdc_server --workdir $WORK_DIR --binary $CDC_BINARY --loglevel "debug" --logsuffix "2" --addr 127.0.0.1:8301 diff --git a/tests/integration_tests/multi_capture/run.sh b/tests/integration_tests/multi_capture/run.sh index 965cddbe251..6b54c9186e7 100755 --- a/tests/integration_tests/multi_capture/run.sh +++ b/tests/integration_tests/multi_capture/run.sh @@ -35,12 +35,12 @@ function run() { TOPIC_NAME="ticdc-multi-capture-test-$RANDOM" case $SINK_TYPE in - kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; + kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; *) SINK_URI="mysql://normal:123456@127.0.0.1:3306/" ;; esac run_cdc_cli changefeed create --start-ts=$start_ts --sink-uri="$SINK_URI" if [ "$SINK_TYPE" == "kafka" ]; then - run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" + run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" fi # check tables are created and data is synchronized diff --git a/tests/integration_tests/multi_source/run.sh b/tests/integration_tests/multi_source/run.sh index e0442983b64..1c38616dfb6 100755 --- a/tests/integration_tests/multi_source/run.sh +++ b/tests/integration_tests/multi_source/run.sh @@ -23,12 +23,12 @@ function prepare() { TOPIC_NAME="ticdc-multi-source-test-$RANDOM" case $SINK_TYPE in - kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; + kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; *) SINK_URI="mysql://normal:123456@127.0.0.1:3306/" ;; esac run_cdc_cli changefeed create --start-ts=$start_ts --sink-uri="$SINK_URI" if [ "$SINK_TYPE" == "kafka" ]; then - run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" + run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" fi } diff --git a/tests/integration_tests/new_ci_collation_with_old_value/run.sh b/tests/integration_tests/new_ci_collation_with_old_value/run.sh index ff857f9365a..8df0ddf9a99 100755 --- a/tests/integration_tests/new_ci_collation_with_old_value/run.sh +++ b/tests/integration_tests/new_ci_collation_with_old_value/run.sh @@ -22,12 +22,12 @@ function run() { TOPIC_NAME="ticdc-new_ci_collation_with_old_value-test-$RANDOM" case $SINK_TYPE in - kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; + kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; *) SINK_URI="mysql://normal:123456@127.0.0.1:3306/?safe-mode=true" ;; esac cdc cli changefeed create --start-ts=$start_ts --sink-uri="$SINK_URI" --config $CUR/conf/changefeed.toml if [ "$SINK_TYPE" == "kafka" ]; then - run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" + run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" fi run_sql_file $CUR/data/test1.sql ${UP_TIDB_HOST} ${UP_TIDB_PORT} diff --git a/tests/integration_tests/new_ci_collation_without_old_value/run.sh b/tests/integration_tests/new_ci_collation_without_old_value/run.sh index f9551b4ff88..fbc0fd3f55c 100755 --- a/tests/integration_tests/new_ci_collation_without_old_value/run.sh +++ b/tests/integration_tests/new_ci_collation_without_old_value/run.sh @@ -22,12 +22,12 @@ function run() { TOPIC_NAME="ticdc-new_ci_collation_without_old_value-test-$RANDOM" case $SINK_TYPE in - kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&kafka-version=${KAFKA_VERSION}" ;; + kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-version=${KAFKA_VERSION}" ;; *) SINK_URI="mysql://normal:123456@127.0.0.1:3306/?safe-mode=true" ;; esac cdc cli changefeed create --start-ts=$start_ts --sink-uri="$SINK_URI" --config $CUR/conf/changefeed.toml if [ "$SINK_TYPE" == "kafka" ]; then - run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" + run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" fi run_sql_file $CUR/data/test1.sql ${UP_TIDB_HOST} ${UP_TIDB_PORT} diff --git a/tests/integration_tests/partition_table/run.sh b/tests/integration_tests/partition_table/run.sh index 86a80e26e67..9d7a5592742 100644 --- a/tests/integration_tests/partition_table/run.sh +++ b/tests/integration_tests/partition_table/run.sh @@ -22,12 +22,12 @@ function run() { TOPIC_NAME="ticdc-partition-table-test-$RANDOM" case $SINK_TYPE in - kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; + kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; *) SINK_URI="mysql://normal:123456@127.0.0.1:3306/" ;; esac run_cdc_cli changefeed create --start-ts=$start_ts --sink-uri="$SINK_URI" if [ "$SINK_TYPE" == "kafka" ]; then - run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" + run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" fi run_sql_file $CUR/data/prepare.sql ${UP_TIDB_HOST} ${UP_TIDB_PORT} # sync_diff can't check non-exist table, so we check expected tables are created in downstream first diff --git a/tests/integration_tests/processor_err_chan/run.sh b/tests/integration_tests/processor_err_chan/run.sh index 7e9ca97176c..ec5025f3d13 100644 --- a/tests/integration_tests/processor_err_chan/run.sh +++ b/tests/integration_tests/processor_err_chan/run.sh @@ -36,7 +36,7 @@ function run() { pd_addr="http://$UP_PD_HOST_1:$UP_PD_PORT_1" TOPIC_NAME="ticdc-processor-err-chan-$RANDOM" case $SINK_TYPE in - kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; + kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; *) SINK_URI="mysql://normal:123456@127.0.0.1:3306/?max-txn-row=1" ;; esac @@ -52,7 +52,7 @@ function run() { changefeed_id=$(cdc cli changefeed create --pd=$pd_addr --sink-uri="$SINK_URI" 2>&1 | tail -n2 | head -n1 | awk '{print $2}') if [ "$SINK_TYPE" == "kafka" ]; then - run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" + run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" fi retry_time=10 diff --git a/tests/integration_tests/processor_panic/run.sh b/tests/integration_tests/processor_panic/run.sh index 20f9aa6fd8b..c1724b41b9f 100644 --- a/tests/integration_tests/processor_panic/run.sh +++ b/tests/integration_tests/processor_panic/run.sh @@ -25,12 +25,12 @@ function prepare() { TOPIC_NAME="ticdc-processor-panic-test-$RANDOM" case $SINK_TYPE in - kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&kafka-client-id=cdc_test_processor_panic&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; + kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-client-id=cdc_test_processor_panic&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; *) SINK_URI="mysql://normal:123456@127.0.0.1:3306/" ;; esac run_cdc_cli changefeed create --start-ts=$start_ts --sink-uri="$SINK_URI" if [ "$SINK_TYPE" == "kafka" ]; then - run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" + run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" fi } diff --git a/tests/integration_tests/processor_resolved_ts_fallback/run.sh b/tests/integration_tests/processor_resolved_ts_fallback/run.sh index b99754b42d6..4c3504911fe 100755 --- a/tests/integration_tests/processor_resolved_ts_fallback/run.sh +++ b/tests/integration_tests/processor_resolved_ts_fallback/run.sh @@ -28,7 +28,7 @@ function run() { esac run_cdc_cli changefeed create --sink-uri="$SINK_URI" if [ "$SINK_TYPE" == "kafka" ]; then - run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" + run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" fi run_sql "CREATE database processor_resolved_ts_fallback;" ${UP_TIDB_HOST} ${UP_TIDB_PORT} diff --git a/tests/integration_tests/processor_stop_delay/run.sh b/tests/integration_tests/processor_stop_delay/run.sh index 89d23b963fb..9240ec9a4e5 100644 --- a/tests/integration_tests/processor_stop_delay/run.sh +++ b/tests/integration_tests/processor_stop_delay/run.sh @@ -17,7 +17,7 @@ function run() { pd_addr="http://$UP_PD_HOST_1:$UP_PD_PORT_1" TOPIC_NAME="ticdc-processor-stop-delay-$RANDOM" case $SINK_TYPE in - kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; + kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; *) SINK_URI="mysql://normal:123456@127.0.0.1:3306/?max-txn-row=1" ;; esac export GO_FAILPOINTS='github.com/pingcap/tiflow/cdc/processor/processorStopDelay=1*sleep(10000)' @@ -25,7 +25,7 @@ function run() { run_cdc_server --workdir $WORK_DIR --binary $CDC_BINARY --addr "127.0.0.1:8300" --pd $pd_addr changefeed_id=$(cdc cli changefeed create --pd=$pd_addr --sink-uri="$SINK_URI" 2>&1 | tail -n2 | head -n1 | awk '{print $2}') if [ "$SINK_TYPE" == "kafka" ]; then - run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" + run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" fi run_sql "CREATE DATABASE processor_stop_delay;" ${UP_TIDB_HOST} ${UP_TIDB_PORT} diff --git a/tests/integration_tests/region_merge/run.sh b/tests/integration_tests/region_merge/run.sh index eb365e6347b..7d2e57bf0dc 100644 --- a/tests/integration_tests/region_merge/run.sh +++ b/tests/integration_tests/region_merge/run.sh @@ -37,12 +37,12 @@ function run() { run_cdc_server --workdir $WORK_DIR --binary $CDC_BINARY TOPIC_NAME="ticdc-region-merge-test-$RANDOM" case $SINK_TYPE in - kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; + kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; *) SINK_URI="mysql://normal:123456@127.0.0.1:3306/" ;; esac run_cdc_cli changefeed create --sink-uri="$SINK_URI" if [ "$SINK_TYPE" == "kafka" ]; then - run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" + run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" fi # set max_execution_time to 30s, because split region could block even region has been split. diff --git a/tests/integration_tests/resolve_lock/run.sh b/tests/integration_tests/resolve_lock/run.sh index b02f989b57c..84aa2901d58 100755 --- a/tests/integration_tests/resolve_lock/run.sh +++ b/tests/integration_tests/resolve_lock/run.sh @@ -23,12 +23,12 @@ function prepare() { TOPIC_NAME="ticdc-resolve-lock-test-$RANDOM" case $SINK_TYPE in - kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; + kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; *) SINK_URI="mysql://normal:123456@127.0.0.1:3306/tidb-txn-mode=pessimistic" ;; esac run_cdc_cli changefeed create --start-ts=$start_ts --sink-uri="$SINK_URI" if [ "$SINK_TYPE" == "kafka" ]; then - run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" + run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" fi } diff --git a/tests/integration_tests/row_format/data/step3.sql b/tests/integration_tests/row_format/data/step3.sql index c6d822392a0..5e90c68d122 100644 --- a/tests/integration_tests/row_format/data/step3.sql +++ b/tests/integration_tests/row_format/data/step3.sql @@ -191,13 +191,13 @@ insert into tp_real(c_float, c_double, c_decimal, c_decimal_2) values (-2.7182818284, -3.1415926, -8000, -179394.233); create table tp_unsigned_real ( - id int auto_increment, - c_unsigned_float float unsigned null, - c_unsigned_double double unsigned null, - c_unsigned_decimal decimal unsigned null, - c_unsigned_decimal_2 decimal(10, 4) unsigned null, - constraint pk - primary key (id) + id int auto_increment, + c_unsigned_float float unsigned null, + c_unsigned_double double unsigned null, + c_unsigned_decimal decimal unsigned null, + c_unsigned_decimal_2 decimal(10, 4) unsigned null, + constraint pk + primary key (id) ); insert into tp_unsigned_real() diff --git a/tests/integration_tests/row_format/run.sh b/tests/integration_tests/row_format/run.sh index 67ed3ef06d7..9c7704d52d7 100644 --- a/tests/integration_tests/row_format/run.sh +++ b/tests/integration_tests/row_format/run.sh @@ -22,12 +22,12 @@ function run() { TOPIC_NAME="ticdc-row-format-test-$RANDOM" case $SINK_TYPE in - kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; + kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; *) SINK_URI="mysql://normal:123456@127.0.0.1:3306/" ;; esac run_cdc_cli changefeed create --start-ts=$start_ts --sink-uri="$SINK_URI" if [ "$SINK_TYPE" == "kafka" ]; then - run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" + run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" fi run_sql "SET GLOBAL tidb_row_format_version = 1;" ${UP_TIDB_HOST} ${UP_TIDB_PORT} diff --git a/tests/integration_tests/simple/run.sh b/tests/integration_tests/simple/run.sh index bb390e0dbb2..f629477a7d5 100644 --- a/tests/integration_tests/simple/run.sh +++ b/tests/integration_tests/simple/run.sh @@ -25,12 +25,12 @@ function prepare() { TOPIC_NAME="ticdc-simple-test-$RANDOM" case $SINK_TYPE in - kafka) SINK_URI="kafka+ssl://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&kafka-client-id=cdc_test_simple&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; + kafka) SINK_URI="kafka+ssl://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-client-id=cdc_test_simple&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; *) SINK_URI="mysql+ssl://normal:123456@127.0.0.1:3306/" ;; esac run_cdc_cli changefeed create --start-ts=$start_ts --sink-uri="$SINK_URI" if [ "$SINK_TYPE" == "kafka" ]; then - run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" + run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" fi } diff --git a/tests/integration_tests/sink_hang/run.sh b/tests/integration_tests/sink_hang/run.sh index a75689e101d..aa7f675e378 100644 --- a/tests/integration_tests/sink_hang/run.sh +++ b/tests/integration_tests/sink_hang/run.sh @@ -38,7 +38,7 @@ function run() { pd_addr="http://$UP_PD_HOST_1:$UP_PD_PORT_1" TOPIC_NAME="ticdc-sink-hang-test-$RANDOM" case $SINK_TYPE in - kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; + kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; *) SINK_URI="mysql://normal:123456@127.0.0.1:3306/?max-txn-row=1" ;; esac @@ -46,7 +46,7 @@ function run() { run_cdc_server --workdir $WORK_DIR --binary $CDC_BINARY --addr "127.0.0.1:8300" --pd $pd_addr changefeed_id=$(cdc cli changefeed create --pd=$pd_addr --sink-uri="$SINK_URI" 2>&1 | tail -n2 | head -n1 | awk '{print $2}') if [ "$SINK_TYPE" == "kafka" ]; then - run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" + run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" fi run_sql "CREATE DATABASE sink_hang;" ${UP_TIDB_HOST} ${UP_TIDB_PORT} diff --git a/tests/integration_tests/sink_retry/run.sh b/tests/integration_tests/sink_retry/run.sh index 88b0219257a..6120f718404 100755 --- a/tests/integration_tests/sink_retry/run.sh +++ b/tests/integration_tests/sink_retry/run.sh @@ -26,12 +26,12 @@ function run() { TOPIC_NAME="ticdc-sink-retry-test-$RANDOM" case $SINK_TYPE in - kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; + kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; *) SINK_URI="mysql://normal:123456@127.0.0.1:3306/?max-txn-row=1" ;; esac run_cdc_cli changefeed create --start-ts=$start_ts --sink-uri="$SINK_URI" if [ "$SINK_TYPE" == "kafka" ]; then - run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" + run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" fi check_table_exists "sink_retry.USERTABLE" ${DOWN_TIDB_HOST} ${DOWN_TIDB_PORT} diff --git a/tests/integration_tests/sorter/run.sh b/tests/integration_tests/sorter/run.sh index 1020e4d7f98..a6e4d519b6d 100755 --- a/tests/integration_tests/sorter/run.sh +++ b/tests/integration_tests/sorter/run.sh @@ -65,12 +65,12 @@ function run() { TOPIC_NAME="ticdc-unified-sorter-test-$RANDOM" CF_NAME=$TOPIC_NAME case $SINK_TYPE in - kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; + kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; *) SINK_URI="mysql://normal:123456@127.0.0.1:3306/?max-txn-row=1" ;; esac run_cdc_cli changefeed create -c $CF_NAME --start-ts=$start_ts --sink-uri="$SINK_URI" --sort-engine="unified" if [ "$SINK_TYPE" == "kafka" ]; then - run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" + run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" fi run_test @@ -88,12 +88,12 @@ function run() { TOPIC_NAME="ticdc-leveldb-sorter-test-$RANDOM" CF_NAME=$TOPIC_NAME case $SINK_TYPE in - kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; + kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; *) SINK_URI="mysql://normal:123456@127.0.0.1:3306/?max-txn-row=1" ;; esac run_cdc_cli changefeed create -c $CF_NAME --start-ts=$start_ts --sink-uri="$SINK_URI" --sort-engine="unified" if [ "$SINK_TYPE" == "kafka" ]; then - run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" + run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" fi run_test diff --git a/tests/integration_tests/split_region/run.sh b/tests/integration_tests/split_region/run.sh index 33b86ec2333..1e241b15d12 100755 --- a/tests/integration_tests/split_region/run.sh +++ b/tests/integration_tests/split_region/run.sh @@ -24,12 +24,12 @@ function run() { TOPIC_NAME="ticdc-split-region-test-$RANDOM" case $SINK_TYPE in - kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; + kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; *) SINK_URI="mysql://normal:123456@127.0.0.1:3306/" ;; esac run_cdc_cli changefeed create --start-ts=$start_ts --sink-uri="$SINK_URI" if [ "$SINK_TYPE" == "kafka" ]; then - run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" + run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" fi # sync_diff can't check non-exist table, so we check expected tables are created in downstream first diff --git a/tests/integration_tests/tiflash/run.sh b/tests/integration_tests/tiflash/run.sh index 1f14ee354ed..58d6d90c69a 100644 --- a/tests/integration_tests/tiflash/run.sh +++ b/tests/integration_tests/tiflash/run.sh @@ -22,12 +22,12 @@ function run() { TOPIC_NAME="ticdc-tiflash-test-$RANDOM" case $SINK_TYPE in - kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; + kafka) SINK_URI="kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&kafka-version=${KAFKA_VERSION}&max-message-bytes=10485760" ;; *) SINK_URI="mysql://normal:123456@127.0.0.1:3306/" ;; esac run_cdc_cli changefeed create --start-ts=$start_ts --sink-uri="$SINK_URI" if [ "$SINK_TYPE" == "kafka" ]; then - run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" + run_kafka_consumer $WORK_DIR "kafka://127.0.0.1:9092/$TOPIC_NAME?protocol=open-protocol&partition-num=4&version=${KAFKA_VERSION}&max-message-bytes=10485760" fi run_sql_file $CUR/data/prepare.sql ${UP_TIDB_HOST} ${UP_TIDB_PORT} # sync_diff can't check non-exist table, so we check expected tables are created in downstream first