Skip to content

Commit

Permalink
Switch imports for forked Pilosa and fix various lint issues (#1378)
Browse files Browse the repository at this point in the history
  • Loading branch information
richardartoul authored Feb 14, 2019
1 parent 3b5d849 commit a5e1a27
Show file tree
Hide file tree
Showing 51 changed files with 115 additions and 218 deletions.
54 changes: 30 additions & 24 deletions glide.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 1 addition & 2 deletions glide.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -124,8 +124,7 @@ import:

# NB(r): make sure to use the master commit for pilosa
# once all upstream changes are complete in github.com/pilosa/pilosa.
- package: github.com/pilosa/pilosa/roaring
repo: https://github.com/m3db/pilosa
- package: github.com/m3db/pilosa/roaring
version: ac8920c6e1abe06e2b0a3deba79a9910c39700e6

# NB(prateek): ideally, the following dependencies would be under testImport, but
Expand Down
6 changes: 0 additions & 6 deletions src/aggregator/aggregator/handler/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,16 +36,10 @@ import (
"github.com/m3db/m3x/pool"
)

const (
initialBufferSizeGrowthFactor = 2
)

var (
errNoHandlerConfiguration = errors.New("no handler configuration")
errNoWriterConfiguration = errors.New("no writer configuration")
errNoDynamicOrStaticBackendConfiguration = errors.New("neither dynamic nor static backend was configured")
errBothDynamicAndStaticBackendConfiguration = errors.New("both dynamic and static backend were configured")
errInvalidShardingConfiguration = errors.New("invalid sharding configuration, missing hash type or total shards")
)

// FlushHandlerConfiguration configures flush handlers.
Expand Down
6 changes: 0 additions & 6 deletions src/aggregator/aggregator/handler/writer/protobuf_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -204,12 +204,6 @@ func testProtobufWriter(t *testing.T, ctrl *gomock.Controller, opts Options) *pr
return NewProtobufWriter(p, sharding.Murmur32Hash.MustShardFn(), opts).(*protobufWriter)
}

type encodeData struct {
aggregated.ChunkedMetricWithStoragePolicy

encodedAtNanos int64
}

type decodeData struct {
aggregated.MetricWithStoragePolicy

Expand Down
4 changes: 2 additions & 2 deletions src/cluster/placement/algo/sharded_helper.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,13 +97,13 @@ type helper struct {
shardToInstanceMap map[uint32]map[placement.Instance]struct{}
groupToInstancesMap map[string]map[placement.Instance]struct{}
groupToWeightMap map[string]uint32
totalWeight uint32
rf int
uniqueShards []uint32
instances map[string]placement.Instance
maxShardSetID uint32
log log.Logger
opts placement.Options
totalWeight uint32
maxShardSetID uint32
}

// NewPlacementHelper returns a placement helper
Expand Down
12 changes: 6 additions & 6 deletions src/cluster/placement/options.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,22 +57,22 @@ func defaultTimeNanosFn() int64 { return shard.UnInitializedV
func defaultShardValidationFn(s shard.Shard) error { return nil }

type options struct {
allowPartialReplace bool
addAllCandidates bool
shardStateMode ShardStateMode
isSharded bool
isMirrored bool
isStaged bool
iopts instrument.Options
validZone string
dryrun bool
placementCutOverFn TimeNanosFn
shardCutOverFn TimeNanosFn
shardCutOffFn TimeNanosFn
isShardCutoverFn ShardValidateFn
isShardCutoffFn ShardValidateFn
validateFn ValidateFn
nowFn clock.NowFn
allowPartialReplace bool
addAllCandidates bool
dryrun bool
isSharded bool
isMirrored bool
isStaged bool
}

// NewOptions returns a default Options.
Expand Down
10 changes: 5 additions & 5 deletions src/cluster/placement/placement.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,11 +49,11 @@ type placement struct {
instancesByShard map[uint32][]Instance
rf int
shards []uint32
isSharded bool
isMirrored bool
cutoverNanos int64
maxShardSetID uint32
version int
maxShardSetID uint32
isSharded bool
isMirrored bool
}

// NewPlacement returns a ServicePlacement
Expand Down Expand Up @@ -446,11 +446,11 @@ type instance struct {
id string
isolationGroup string
zone string
weight uint32
endpoint string
hostname string
port uint32
shards shard.Shards
port uint32
weight uint32
shardSetID uint32
}

Expand Down
4 changes: 2 additions & 2 deletions src/cmd/services/m3coordinator/downsample/flush_handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -139,8 +139,8 @@ func (w *downsamplerFlushHandlerWriter) Write(

// NB(r): Quite gross, need to actually make it possible to plumb this
// through for each metric.
if bytes.Compare(name, MetricsOptionIDSchemeTagName) == 0 {
if bytes.Compare(value, GraphiteIDSchemeTagValue) == 0 &&
if bytes.Equal(name, MetricsOptionIDSchemeTagName) {
if bytes.Equal(value, GraphiteIDSchemeTagValue) &&
tags.Opts.IDSchemeType() != models.TypeGraphite {
iter.Reset(mp.ChunkedID.Data)
tags.Opts = w.tagOptions.SetIDSchemeType(models.TypeGraphite)
Expand Down
2 changes: 1 addition & 1 deletion src/cmd/services/m3coordinator/downsample/options.go
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ var (
return maxBufferPast
}

return time.Duration(value)
return value
}

errNoStorage = errors.New("dynamic downsampling enabled with storage not set")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ func (a *samplesAppender) AppendGaugeTimedSample(t time.Time, value float64) err
Type: metric.GaugeType,
ID: a.unownedID,
TimeNanos: t.UnixNano(),
Value: float64(value),
Value: value,
})
}

Expand Down
4 changes: 2 additions & 2 deletions src/cmd/services/m3coordinator/ingest/carbon/ingest.go
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ func NewIngester(
resourcePool.Init(func() interface{} {
return &lineResources{
name: make([]byte, 0, maxResourcePoolNameSize),
datapoints: make([]ts.Datapoint, 1, 1),
datapoints: make([]ts.Datapoint, 1),
tags: make([]models.Tag, 0, maxPooledTagsSize),
}
})
Expand Down Expand Up @@ -224,7 +224,7 @@ func (i *ingester) write(
if i.opts.Debug {
i.logger.Infof(
"carbon metric: %s matched by pattern: %s with mapping rules: %#v and storage policies: %#v",
string(resources.name), string(rule.rule.Pattern), rule.mappingRules, rule.storagePolicies)
string(resources.name), rule.rule.Pattern, rule.mappingRules, rule.storagePolicies)
}
// Break because we only want to apply one rule per metric based on which
// ever one matches first.
Expand Down
6 changes: 0 additions & 6 deletions src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -284,21 +284,18 @@ func TestIngesterHonorsPatterns(t *testing.T) {
tags: mustGenerateTagsFromName(t, []byte("foo.match-regex1.bar.baz")),
timestamp: 1,
value: 1,
isValid: true,
},
{
metric: []byte("foo.match-regex2.bar.baz"),
tags: mustGenerateTagsFromName(t, []byte("foo.match-regex2.bar.baz")),
timestamp: 2,
value: 2,
isValid: true,
},
{
metric: []byte("foo.match-regex3.bar.baz"),
tags: mustGenerateTagsFromName(t, []byte("foo.match-regex3.bar.baz")),
timestamp: 3,
value: 3,
isValid: true,
},
}, found)
}
Expand Down Expand Up @@ -409,7 +406,6 @@ type testMetric struct {
tags models.Tags
timestamp int
value float64
isValid bool
}

func assertTestMetricsAreEqual(t *testing.T, a, b []testMetric) {
Expand Down Expand Up @@ -484,9 +480,7 @@ var (
falseVar = false
falsePtr = &falseVar
aggregateMean = aggregation.Mean
aggregateSum = aggregation.Sum
aggregateLast = aggregation.Last
aggregateMeanPtr = &aggregateMean
aggregateSumPtr = &aggregateSum
aggregateLastPtr = &aggregateLast
)
12 changes: 4 additions & 8 deletions src/cmd/services/m3coordinator/ingest/write.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,11 +66,11 @@ type DownsamplerAndWriter interface {
// WriteOptions contains overrides for the downsampling mapping
// rules and storage policies for a given write.
type WriteOptions struct {
DownsampleOverride bool
DownsampleMappingRules []downsample.MappingRule
WriteStoragePolicies []policy.StoragePolicy

WriteOverride bool
WriteStoragePolicies []policy.StoragePolicy
DownsampleOverride bool
WriteOverride bool
}

// downsamplerAndWriter encapsulates the logic for writing data to the downsampler,
Expand Down Expand Up @@ -327,11 +327,7 @@ func (d *downsamplerAndWriter) writeAggregatedBatch(
}
appender.Finalize()

if err := iter.Error(); err != nil {
return err
}

return nil
return iter.Error()
}

func (d *downsamplerAndWriter) Storage() storage.Storage {
Expand Down
6 changes: 1 addition & 5 deletions src/cmd/services/m3dbnode/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,11 +61,7 @@ type Configuration struct {
// InitDefaultsAndValidate initializes all default values and validates the Configuration.
// We use this method to validate fields where the validator package falls short.
func (c *Configuration) InitDefaultsAndValidate() error {
if err := c.DB.InitDefaultsAndValidate(); err != nil {
return err
}

return nil
return c.DB.InitDefaultsAndValidate()
}

// DBConfiguration is the configuration for a DB node.
Expand Down
Loading

0 comments on commit a5e1a27

Please sign in to comment.