Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix most staticcheck warnings #989

Closed
wants to merge 14 commits into from
2 changes: 1 addition & 1 deletion active/active.go
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ func NewGCSSource(ctx context.Context, label string, fl FileLister, toRunnable f
fileLister: fl,
toRunnable: toRunnable,

pendingChan: make(chan Runnable, 0),
pendingChan: make(chan Runnable),
label: label,
}

Expand Down
2 changes: 1 addition & 1 deletion active/poller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ func (g *fakeGardener) ServeHTTP(w http.ResponseWriter, r *http.Request) {
log.Fatal("Should be POST") // Not t.Fatal because this is asynchronous.
}
g.lock.Lock()
g.lock.Unlock()
defer g.lock.Unlock()
switch r.URL.Path {
case "/job":
if len(g.jobs) < 1 {
Expand Down
7 changes: 7 additions & 0 deletions bq/bq_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,14 +24,21 @@ func init() {
log.SetFlags(log.LstdFlags | log.Lshortfile)
}

//lint:ignore U1000 compile time assertions
func assertInserter(in etl.Inserter) {
func(in etl.Inserter) {}(&bq.BQInserter{})
}

//lint:ignore U1000 compile time assertions
func assertSinkFactory(f factory.SinkFactory) {
func(f factory.SinkFactory) {}(bq.NewSinkFactory())
}

//lint:ignore U1000 compile time assertions
func assertSaver(ms bq.MapSaver) {
func(bigquery.ValueSaver) {}(ms)
}

func TestSinkFactory(t *testing.T) {
f := bq.NewSinkFactory()
s, err := f.Get(context.Background(), etl.DataPath{})
Expand Down
4 changes: 0 additions & 4 deletions bq/insert.go
Original file line number Diff line number Diff line change
Expand Up @@ -164,10 +164,6 @@ func (s MapSaver) Save() (row map[string]bigquery.Value, insertID string, err er
return s, "", nil
}

func assertSaver(ms MapSaver) {
func(bigquery.ValueSaver) {}(ms)
}

//----------------------------------------------------------------------------

// BQInserter provides an API for inserting rows into a specific BQ Table.
Expand Down
40 changes: 20 additions & 20 deletions cmd/etl_worker/etl_worker.go
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,7 @@ func handleRequest(rwr http.ResponseWriter, rq *http.Request) {
rawFileName := rq.FormValue("filename")
status, msg := subworker(rawFileName, executionCount, retryCount, age)
rwr.WriteHeader(status)
fmt.Fprintf(rwr, msg)
fmt.Fprint(rwr, msg)
}

func subworker(rawFileName string, executionCount, retryCount int, age time.Duration) (status int, msg string) {
Expand Down Expand Up @@ -334,25 +334,25 @@ func startServers(ctx context.Context, mux http.Handler) *errgroup.Group {
// This publishes the service port for use in unit tests.
mainServerAddr <- server.Addr

select {
case <-ctx.Done():
// This currently only executes when the context is cancelled
// by unit tests. It does not yet execute in production.
log.Println("Shutting down servers")
ctx, cancel := context.WithTimeout(context.Background(), *shutdownTimeout)
defer cancel()
start := time.Now()
eg := errgroup.Group{}
eg.Go(func() error {
return server.Shutdown(ctx)
})
eg.Go(func() error {
return promServer.Shutdown(ctx)
})
eg.Wait()
log.Println("Shutdown took", time.Since(start))
return &eg
}
// Wait for shutdown
<-ctx.Done()

// This currently only executes when the context is cancelled
// by unit tests. It does not yet execute in production.
log.Println("Shutting down servers")
ctx, cancel := context.WithTimeout(context.Background(), *shutdownTimeout)
defer cancel()
start := time.Now()
eg := errgroup.Group{}
eg.Go(func() error {
return server.Shutdown(ctx)
})
eg.Go(func() error {
return promServer.Shutdown(ctx)
})
eg.Wait()
log.Println("Shutdown took", time.Since(start))
return &eg
}

func main() {
Expand Down
20 changes: 1 addition & 19 deletions cmd/generate_schema_docs/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ func init() {

flag.Usage = func() {
fmt.Fprintf(os.Stderr, "%s\n", os.Args[0])
fmt.Fprintf(os.Stderr, usage)
fmt.Fprint(os.Stderr, usage)
fmt.Fprintln(os.Stderr, "Flags:")
flag.PrintDefaults()
}
Expand Down Expand Up @@ -114,24 +114,6 @@ func generateRichMarkdown(s bigquery.Schema, t schemaGenerator) []byte {
return buf.Bytes()
}

// TODO: remove this function if it turns out to be replaced by generateRichMarkdown.
func generateMarkdown(schema bigquery.Schema) []byte {
buf := &bytes.Buffer{}
fmt.Fprintln(buf, "| Field name | Type | Description |")
fmt.Fprintln(buf, "| :----------------|:----------:|:---------------|")
bqx.WalkSchema(schema, func(prefix []string, field *bigquery.FieldSchema) error {
var path string
if len(prefix) == 1 {
path = ""
} else {
path = strings.Join(prefix[:len(prefix)-1], ".") + "."
}
fmt.Fprintf(buf, "| %s**%s** | %s | %s |\n", path, prefix[len(prefix)-1], field.Type, field.Description)
return nil
})
return buf.Bytes()
}

// All record structs define a Schema method. This interface allows us to
// process each of them easily.
type schemaGenerator interface {
Expand Down
6 changes: 0 additions & 6 deletions cmd/update-schema/update.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ import (
"time"

"cloud.google.com/go/bigquery"
"google.golang.org/api/googleapi"

"github.com/m-lab/go/cloud/bqx"
"github.com/m-lab/go/flagx"
Expand Down Expand Up @@ -109,11 +108,6 @@ func CreateOrUpdate(schema bigquery.Schema, project, dataset, table, partField s
log.Println("UpdateTable failed:", err)
// TODO add specific error handling for incompatible schema change

apiErr, ok := err.(*googleapi.Error)
if !ok || apiErr.Code != 404 {
// TODO - different behavior on specific error types?
}

partitioning := &bigquery.TimePartitioning{
Field: partField,
}
Expand Down
9 changes: 3 additions & 6 deletions etl/globals.go
Original file line number Diff line number Diff line change
Expand Up @@ -83,9 +83,6 @@ var (
expNNNNE + // 4,5,6
suffix + `$`) // 7

dateTimePattern = regexp.MustCompile(dateTime)
sitePattern = regexp.MustCompile(type2 + mlabNSiteNN)

justSitePattern = regexp.MustCompile(`.*` + mlabNSiteNN + `.*`)
)

Expand Down Expand Up @@ -114,16 +111,16 @@ type DataPath struct {
func ValidateTestPath(path string) (DataPath, error) {
basic := basicTaskPattern.FindStringSubmatch(path)
if basic == nil {
return DataPath{}, errors.New("Path missing date-time string")
return DataPath{}, errors.New("path missing date-time string")
}
preamble := startPattern.FindStringSubmatch(basic[1])
if preamble == nil {
return DataPath{}, errors.New("Invalid preamble: " + fmt.Sprint(basic))
return DataPath{}, errors.New("invalid preamble: " + fmt.Sprint(basic))
}

post := endPattern.FindStringSubmatch(basic[5])
if post == nil {
return DataPath{}, errors.New("Invalid postamble: " + basic[5])
return DataPath{}, errors.New("invalid postamble: " + basic[5])
}
dp := DataPath{
URI: path,
Expand Down
5 changes: 1 addition & 4 deletions fake/fold.go
Original file line number Diff line number Diff line change
Expand Up @@ -100,10 +100,7 @@ func equalFoldRight(s, t []byte) bool {
t = t[size:]

}
if len(t) > 0 {
return false
}
return true
return len(t) >= 0
}

// asciiEqualFold is a specialization of bytes.EqualFold for use when
Expand Down
19 changes: 0 additions & 19 deletions fake/uploader.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,6 @@ import (
// Stuff from params.go
//---------------------------------------------------------------------------------------
var (
// See https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#timestamp-type.
timestampFormat = "2006-01-02 15:04:05.999999-07:00"

// See https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#schema.fields.name
validFieldName = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]{0,127}$")
)
Expand Down Expand Up @@ -66,21 +63,6 @@ var (

var typeOfByteSlice = reflect.TypeOf([]byte{})

var schemaCache Cache

type cacheVal struct {
schema bigquery.Schema
err error
}

func inferSchemaReflectCached(t reflect.Type) (bigquery.Schema, error) {
cv := schemaCache.Get(t, func() interface{} {
s, err := inferSchemaReflect(t)
return cacheVal{s, err}
}).(cacheVal)
return cv.schema, cv.err
}

func inferSchemaReflect(t reflect.Type) (bigquery.Schema, error) {
rec, err := hasRecursiveType(t, nil)
if err != nil {
Expand Down Expand Up @@ -241,7 +223,6 @@ func hasRecursiveType(t reflect.Type, seen *typeList) (bool, error) {
// FakeUploader is a fake for Uploader, for use in debugging, and tests.
// See bigquery.Uploader for field info.
type FakeUploader struct {
t *bigquery.Table
SkipInvalidRows bool
IgnoreUnknownValues bool
TableTemplateSuffix string
Expand Down
2 changes: 0 additions & 2 deletions metrics/metrics_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,6 @@ func rePanic() {
}()
a := []int{1, 2, 3}
log.Println(a[4])
// This is never reached.
return
}

func TestCountPanics(t *testing.T) {
Expand Down
2 changes: 1 addition & 1 deletion parser/annotation.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ type AnnotationParser struct {
type nullAnnotator struct{}

func (ann *nullAnnotator) GetAnnotations(ctx context.Context, date time.Time, ips []string, info ...string) (*v2as.Response, error) {
return &v2as.Response{AnnotatorDate: time.Now(), Annotations: make(map[string]*api.Annotations, 0)}, nil
return &v2as.Response{AnnotatorDate: time.Now(), Annotations: make(map[string]*api.Annotations)}, nil
}

// NewAnnotationParser creates a new parser for annotation data.
Expand Down
4 changes: 2 additions & 2 deletions parser/base.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,9 @@ import (

// Errors that may be returned by BaseRowBuffer functions.
var (
ErrAnnotationError = errors.New("Annotation error")
ErrAnnotationError = errors.New("annotation error")
ErrNotAnnotatable = errors.New("object does not implement Annotatable")
ErrRowNotPointer = errors.New("Row should be a pointer type")
ErrRowNotPointer = errors.New("row should be a pointer type")
)

// RowBuffer provides all basic functionality generally needed for buffering, annotating, and inserting
Expand Down
1 change: 1 addition & 0 deletions parser/base_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ func (row *Row) GetLogTime() time.Time {
return time.Now()
}

//lint:ignore U1000 compile time assertions
func assertTestRowAnnotatable(r *Row) {
func(row.Annotatable) {}(r)
}
Expand Down
3 changes: 2 additions & 1 deletion parser/disco_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,8 @@ func TestJSONParsing(t *testing.T) {

// DISABLED
// This tests insertion into a test table in the cloud. Should not normally be executed.
func xTestRealBackend(t *testing.T) {
func TestRealBackend(t *testing.T) {
t.Skip("Disabled")
ins, err := bq.NewInserter(etl.SW, time.Now())
var parser etl.Parser = parser.NewDiscoParser(ins)

Expand Down
2 changes: 1 addition & 1 deletion parser/ndt_meta_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ func TestMetaParser(t *testing.T) {
meta := parser.ProcessMetaFile("ndt", "suffix", metaName, metaData)

if meta == nil {
t.Error("metaFile has not been populated.")
t.Fatal("metaFile has not been populated.")
}
timestamp, _ := time.Parse("20060102T15:04:05.999999999Z", "20170509T13:45:13.59021Z")
if meta.DateTime != timestamp {
Expand Down
17 changes: 8 additions & 9 deletions parser/ndt_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,17 @@ import (
"github.com/m-lab/etl/schema"
)

//lint:ignore U1000 compile time assertions
func assertSaver(ms schema.Web100ValueMap) {
func(bigquery.ValueSaver) {}(ms)
}

//lint:ignore U1000 compile time assertions
func assertNDTTestIsAnnotatable(r parser.NDTTest) {
func(row.Annotatable) {}(r)
}

//lint:ignore U1000 compile time assertions
func assertNDTTestIsValueSaver(r parser.NDTTest) {
func(bigquery.ValueSaver) {}(r)
}
Expand Down Expand Up @@ -289,6 +296,7 @@ func compare(t *testing.T, actual schema.Web100ValueMap, expected schema.Web100V
return match
}

//lint:ignore U1000 compile time assertions
func assertInserter(in etl.Inserter) {
func(in etl.Inserter) {}(&inMemoryInserter{})
}
Expand All @@ -307,15 +315,6 @@ func newInMemoryInserter() *inMemoryInserter {
return &inMemoryInserter{data, 0, 0, token}
}

// acquire and release handle the single token that protects the FlushSlice and
// access to the metrics.
func (in *inMemoryInserter) acquire() {
<-in.token
}
func (in *inMemoryInserter) release() {
in.token <- struct{}{} // return the token.
}

func (in *inMemoryInserter) Commit(data []interface{}, label string) error {
return in.Put(data)
}
Expand Down
Loading