Skip to content

Commit

Permalink
Added lint rule for checking exported methods (#571)
Browse files Browse the repository at this point in the history
* Added lint rule for checking exported methods

Signed-off-by: Corbin Phelps <[email protected]>

* Fixed uncommented journald

Signed-off-by: Corbin Phelps <[email protected]>
  • Loading branch information
Corbin Phelps authored Feb 23, 2022
1 parent 9ddb722 commit cd2849f
Show file tree
Hide file tree
Showing 20 changed files with 45 additions and 7 deletions.
3 changes: 3 additions & 0 deletions operator/buffer/buffer.go
Original file line number Diff line number Diff line change
Expand Up @@ -71,14 +71,17 @@ func (bc *Config) unmarshal(unmarshal func(interface{}) error) error {
}
}

// MarshalYAML marshals YAML
func (bc Config) MarshalYAML() (interface{}, error) {
return bc.Builder, nil
}

// MarshalJSON marshals JSON
func (bc Config) MarshalJSON() ([]byte, error) {
return json.Marshal(bc.Builder)
}

// Clearer is an interface that is responsible for clearing entries from a buffer
type Clearer interface {
MarkAllAsFlushed() error
MarkRangeAsFlushed(uint, uint) error
Expand Down
4 changes: 4 additions & 0 deletions operator/buffer/disk.go
Original file line number Diff line number Diff line change
Expand Up @@ -316,24 +316,28 @@ func (d *DiskBuffer) Read(dst []*entry.Entry) (f Clearer, i int, err error) {
return d.newClearer(newRead), readCount, nil
}

// MaxChunkSize returns the max chunk size
func (d *DiskBuffer) MaxChunkSize() uint {
d.reconfigMutex.RLock()
defer d.reconfigMutex.RUnlock()
return d.maxChunkSize
}

// MaxChunkDelay returns the max chunk delay
func (d *DiskBuffer) MaxChunkDelay() time.Duration {
d.reconfigMutex.RLock()
defer d.reconfigMutex.RUnlock()
return d.maxChunkDelay
}

// SetMaxChunkSize sets the max chunk size
func (d *DiskBuffer) SetMaxChunkSize(size uint) {
d.reconfigMutex.Lock()
d.maxChunkSize = size
d.reconfigMutex.Unlock()
}

// SetMaxChunkDelay sets the max chunk delay
func (d *DiskBuffer) SetMaxChunkDelay(delay time.Duration) {
d.reconfigMutex.Lock()
d.maxChunkDelay = delay
Expand Down
4 changes: 4 additions & 0 deletions operator/buffer/memory.go
Original file line number Diff line number Diff line change
Expand Up @@ -146,24 +146,28 @@ func (m *MemoryBuffer) ReadWait(ctx context.Context, dst []*entry.Entry) (Cleare
return m.newClearer(inFlightIDs[:i]), i, nil
}

// MaxChunkSize returns the max chunk size
func (m *MemoryBuffer) MaxChunkSize() uint {
m.reconfigMutex.RLock()
defer m.reconfigMutex.RUnlock()
return m.maxChunkSize
}

// MaxChunkDelay returns the max chunk delay
func (m *MemoryBuffer) MaxChunkDelay() time.Duration {
m.reconfigMutex.RLock()
defer m.reconfigMutex.RUnlock()
return m.maxChunkDelay
}

// SetMaxChunkSize sets the max chunk size
func (m *MemoryBuffer) SetMaxChunkSize(size uint) {
m.reconfigMutex.Lock()
m.maxChunkSize = size
m.reconfigMutex.Unlock()
}

// SetMaxChunkDelay sets the max chunk delay
func (m *MemoryBuffer) SetMaxChunkDelay(delay time.Duration) {
m.reconfigMutex.Lock()
m.maxChunkDelay = delay
Expand Down
5 changes: 3 additions & 2 deletions operator/builtin/input/aws/cloudwatch/cloudwatch_persist.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,12 @@ import (
"github.com/observiq/stanza/operator/helper"
)

// Persister ensures data is persisted across shutdowns
type Persister struct {
DB helper.Persister
}

// Helper function to get persisted data
// Read is a helper function to get persisted data
func (p *Persister) Read(key string) (int64, error) {
var startTime int64
buffer := bytes.NewBuffer(p.DB.Get(key))
Expand All @@ -22,7 +23,7 @@ func (p *Persister) Read(key string) (int64, error) {
return startTime, nil
}

// Helper function to set persisted data
// Write is a helper function to set persisted data
func (p *Persister) Write(key string, value int64) {
var buf = make([]byte, 8)
binary.BigEndian.PutUint64(buf, uint64(value))
Expand Down
2 changes: 1 addition & 1 deletion operator/builtin/input/azure/event_hub.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ import (
"go.uber.org/zap"
)

// Eventhub provides methods for reading events from Azure Event Hub.
// EventHub provides methods for reading events from Azure Event Hub.
type EventHub struct {
AzureConfig
Persist *Persister
Expand Down
1 change: 1 addition & 0 deletions operator/builtin/input/azure/event_hub_config.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ type AzureConfig struct {
startAtBeginning bool
}

// Build will build an Azure EventHub input operator
func (a *AzureConfig) Build(buildContext operator.BuildContext, input helper.InputConfig) error {
inputOperator, err := input.Build(buildContext)
if err != nil {
Expand Down
1 change: 1 addition & 0 deletions operator/builtin/input/file/finder.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ import (
"github.com/bmatcuk/doublestar/v3"
)

// Finder is responsible for find files according to the include and exclude rules
type Finder struct {
Include []string `mapstructure:"include,omitempty" json:"include,omitempty" yaml:"include,omitempty"`
Exclude []string `mapstructure:"exclude,omitempty" json:"exclude,omitempty" yaml:"exclude,omitempty"`
Expand Down
7 changes: 6 additions & 1 deletion operator/builtin/input/http/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,13 @@ import (
)

const (
DefaultTimeout = time.Second * 20
// DefaultTimeout is the default timeout for reads and writes
DefaultTimeout = time.Second * 20

// DefaultIdleTimeout default timeout for idle
DefaultIdleTimeout = time.Second * 60

// DefaultMaxBodySize default maximum body size.
DefaultMaxBodySize = 10000000 // 10 megabyte
)

Expand Down
2 changes: 2 additions & 0 deletions operator/builtin/input/journald/journald.go
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
//go:build linux
// +build linux

package journald
Expand Down Expand Up @@ -25,6 +26,7 @@ func init() {
operator.Register("journald_input", func() operator.Builder { return NewJournaldInputConfig("") })
}

// NewJournaldInputConfig creates a new config for Journald Input
func NewJournaldInputConfig(operatorID string) *JournaldInputConfig {
return &JournaldInputConfig{
InputConfig: helper.NewInputConfig(operatorID, "journald_input"),
Expand Down
1 change: 1 addition & 0 deletions operator/builtin/parser/csv/csv.go
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,7 @@ func (r *CSVParser) Process(ctx context.Context, e *entry.Entry) error {
return r.ParserOperator.ProcessWith(ctx, e, r.parse)
}

// ParseFunc is the function that will parse the log entry by CSV
type ParseFunc func(interface{}) (interface{}, error)

// generateParseFunc returns a parse function for a given header, allowing
Expand Down
3 changes: 3 additions & 0 deletions operator/builtin/transformer/recombine/recombine.go
Original file line number Diff line number Diff line change
Expand Up @@ -109,10 +109,12 @@ type RecombineOperator struct {
batch []*entry.Entry
}

// Start will start the processing log entries
func (r *RecombineOperator) Start() error {
return nil
}

// Stop will stop processing log entries
func (r *RecombineOperator) Stop() error {
r.Lock()
defer r.Unlock()
Expand All @@ -123,6 +125,7 @@ func (r *RecombineOperator) Stop() error {
return nil
}

// Process processes a log entry to be combined
func (r *RecombineOperator) Process(ctx context.Context, e *entry.Entry) error {
// Lock the recombine operator because process can't run concurrently
r.Lock()
Expand Down
3 changes: 3 additions & 0 deletions operator/helper/bytesize.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,14 +8,17 @@ import (
"strings"
)

// ByteSize type for modeling Byte sizes
type ByteSize int64

// UnmarshalJSON unmarshals JSON
func (h *ByteSize) UnmarshalJSON(raw []byte) error {
return h.unmarshalShared(func(i interface{}) error {
return json.Unmarshal(raw, &i)
})
}

// UnmarshalYAML unmarshals YAML
func (h *ByteSize) UnmarshalYAML(unmarshal func(interface{}) error) error {
return h.unmarshalShared(unmarshal)
}
Expand Down
1 change: 1 addition & 0 deletions operator/helper/encoding.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ func (c EncodingConfig) Build(_ operator.BuildContext) (Encoding, error) {
}, nil
}

// Encoding represents an text encoding
type Encoding struct {
Encoding encoding.Encoding
}
Expand Down
2 changes: 1 addition & 1 deletion operator/helper/multiline.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ import (
"golang.org/x/text/encoding"
)

// NewBasicConfig creates a new Multiline config
// NewMultilineConfig creates a new Multiline config
func NewMultilineConfig() MultilineConfig {
return MultilineConfig{
LineStartPattern: "",
Expand Down
1 change: 1 addition & 0 deletions operator/helper/parser.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,7 @@ func (p *ParserOperator) ProcessWith(ctx context.Context, entry *entry.Entry, pa
return p.ProcessWithCallback(ctx, entry, parse, nil)
}

// ProcessWithCallback processes and entry and executes the call back
func (p *ParserOperator) ProcessWithCallback(ctx context.Context, entry *entry.Entry, parse ParseFunction, cb func(*entry.Entry) error) error {
// Short circuit if the "if" condition does not match
skip, err := p.Skip(ctx, entry)
Expand Down
1 change: 1 addition & 0 deletions operator/helper/transformer.go
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,7 @@ func (t *TransformerOperator) HandleEntryError(ctx context.Context, entry *entry
return err
}

// Skip if entry doesn't match expression
func (t *TransformerOperator) Skip(_ context.Context, entry *entry.Entry) (bool, error) {
if t.IfExpr == nil {
return false, nil
Expand Down
1 change: 1 addition & 0 deletions operator/helper/writer.go
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,7 @@ func (w *WriterOperator) findOperator(operators []operator.Operator, operatorID
// OutputIDs is a collection of operator IDs used as outputs.
type OutputIDs []string

// WithNamespace adds namespace
func (o OutputIDs) WithNamespace(bc operator.BuildContext) OutputIDs {
namespacedIDs := make([]string, 0, len(o))
for _, id := range o {
Expand Down
3 changes: 2 additions & 1 deletion plugin/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ type Config struct {
Parameters map[string]interface{} `json:",squash" yaml:",squash"`
}

// BuildMulti implements operator.MultiBuilder
// Build implements operator.MultiBuilder
func (c *Config) Build(bc operator.BuildContext) ([]operator.Operator, error) {
if bc.PluginDepth > 10 {
return nil, errors.NewError("reached max plugin depth", "ensure that there are no recursive dependencies in plugins")
Expand Down Expand Up @@ -71,6 +71,7 @@ func (c *Config) yamlOutputs(bc operator.BuildContext) string {
return fmt.Sprintf("[%s]", strings.Join(namespacedOutputs, ","))
}

// UnmarshalJSON unmarshals JSON
func (c *Config) UnmarshalJSON(raw []byte) error {
var m map[string]interface{}
if err := json.Unmarshal(raw, &m); err != nil {
Expand Down
2 changes: 2 additions & 0 deletions revive/config.toml
Original file line number Diff line number Diff line change
Expand Up @@ -27,3 +27,5 @@ warningCode = 1
[rule.unused-parameter]
[rule.unreachable-code]
[rule.redefines-builtin-id]
[rule.exported]
arguments=["disableStutteringCheck"]
5 changes: 4 additions & 1 deletion version/version.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,11 @@ package version
import "fmt"

var (
// GitCommit set externally of the git commit this was built on
GitCommit string
GitTag string

// GitTag set externally of the git tag this was built on
GitTag string
)

// GetVersion returns the version of the stanza library
Expand Down

0 comments on commit cd2849f

Please sign in to comment.