Bumping k8s dependencies to 1.13
This commit is contained in:
343
vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags.go
generated
vendored
Normal file
343
vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags.go
generated
vendored
Normal file
@@ -0,0 +1,343 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package analysisflags defines helpers for processing flags of
|
||||
// analysis driver tools.
|
||||
package analysisflags
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/token"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/tools/go/analysis"
|
||||
)
|
||||
|
||||
// flags common to all {single,multi,unit}checkers.
|
||||
var (
|
||||
JSON = false // -json
|
||||
Context = -1 // -c=N: if N>0, display offending line plus N lines of context
|
||||
)
|
||||
|
||||
// Parse creates a flag for each of the analyzer's flags,
|
||||
// including (in multi mode) a flag named after the analyzer,
|
||||
// parses the flags, then filters and returns the list of
|
||||
// analyzers enabled by flags.
|
||||
func Parse(analyzers []*analysis.Analyzer, multi bool) []*analysis.Analyzer {
|
||||
// Connect each analysis flag to the command line as -analysis.flag.
|
||||
enabled := make(map[*analysis.Analyzer]*triState)
|
||||
for _, a := range analyzers {
|
||||
var prefix string
|
||||
|
||||
// Add -NAME flag to enable it.
|
||||
if multi {
|
||||
prefix = a.Name + "."
|
||||
|
||||
enable := new(triState)
|
||||
enableUsage := "enable " + a.Name + " analysis"
|
||||
flag.Var(enable, a.Name, enableUsage)
|
||||
enabled[a] = enable
|
||||
}
|
||||
|
||||
a.Flags.VisitAll(func(f *flag.Flag) {
|
||||
if !multi && flag.Lookup(f.Name) != nil {
|
||||
log.Printf("%s flag -%s would conflict with driver; skipping", a.Name, f.Name)
|
||||
return
|
||||
}
|
||||
|
||||
name := prefix + f.Name
|
||||
flag.Var(f.Value, name, f.Usage)
|
||||
})
|
||||
}
|
||||
|
||||
// standard flags: -flags, -V.
|
||||
printflags := flag.Bool("flags", false, "print analyzer flags in JSON")
|
||||
addVersionFlag()
|
||||
|
||||
// flags common to all checkers
|
||||
flag.BoolVar(&JSON, "json", JSON, "emit JSON output")
|
||||
flag.IntVar(&Context, "c", Context, `display offending line with this many lines of context`)
|
||||
|
||||
// Add shims for legacy vet flags to enable existing
|
||||
// scripts that run vet to continue to work.
|
||||
_ = flag.Bool("source", false, "no effect (deprecated)")
|
||||
_ = flag.Bool("v", false, "no effect (deprecated)")
|
||||
_ = flag.Bool("all", false, "no effect (deprecated)")
|
||||
_ = flag.String("tags", "", "no effect (deprecated)")
|
||||
for old, new := range vetLegacyFlags {
|
||||
newFlag := flag.Lookup(new)
|
||||
if newFlag != nil && flag.Lookup(old) == nil {
|
||||
flag.Var(newFlag.Value, old, "deprecated alias for -"+new)
|
||||
}
|
||||
}
|
||||
|
||||
flag.Parse() // (ExitOnError)
|
||||
|
||||
// -flags: print flags so that go vet knows which ones are legitimate.
|
||||
if *printflags {
|
||||
printFlags()
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
// If any -NAME flag is true, run only those analyzers. Otherwise,
|
||||
// if any -NAME flag is false, run all but those analyzers.
|
||||
if multi {
|
||||
var hasTrue, hasFalse bool
|
||||
for _, ts := range enabled {
|
||||
switch *ts {
|
||||
case setTrue:
|
||||
hasTrue = true
|
||||
case setFalse:
|
||||
hasFalse = true
|
||||
}
|
||||
}
|
||||
|
||||
var keep []*analysis.Analyzer
|
||||
if hasTrue {
|
||||
for _, a := range analyzers {
|
||||
if *enabled[a] == setTrue {
|
||||
keep = append(keep, a)
|
||||
}
|
||||
}
|
||||
analyzers = keep
|
||||
} else if hasFalse {
|
||||
for _, a := range analyzers {
|
||||
if *enabled[a] != setFalse {
|
||||
keep = append(keep, a)
|
||||
}
|
||||
}
|
||||
analyzers = keep
|
||||
}
|
||||
}
|
||||
|
||||
return analyzers
|
||||
}
|
||||
|
||||
func printFlags() {
|
||||
type jsonFlag struct {
|
||||
Name string
|
||||
Bool bool
|
||||
Usage string
|
||||
}
|
||||
var flags []jsonFlag = nil
|
||||
flag.VisitAll(func(f *flag.Flag) {
|
||||
// Don't report {single,multi}checker debugging
|
||||
// flags as these have no effect on unitchecker
|
||||
// (as invoked by 'go vet').
|
||||
switch f.Name {
|
||||
case "debug", "cpuprofile", "memprofile", "trace":
|
||||
return
|
||||
}
|
||||
|
||||
b, ok := f.Value.(interface{ IsBoolFlag() bool })
|
||||
isBool := ok && b.IsBoolFlag()
|
||||
flags = append(flags, jsonFlag{f.Name, isBool, f.Usage})
|
||||
})
|
||||
data, err := json.MarshalIndent(flags, "", "\t")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
os.Stdout.Write(data)
|
||||
}
|
||||
|
||||
// addVersionFlag registers a -V flag that, if set,
|
||||
// prints the executable version and exits 0.
|
||||
//
|
||||
// It is a variable not a function to permit easy
|
||||
// overriding in the copy vendored in $GOROOT/src/cmd/vet:
|
||||
//
|
||||
// func init() { addVersionFlag = objabi.AddVersionFlag }
|
||||
var addVersionFlag = func() {
|
||||
flag.Var(versionFlag{}, "V", "print version and exit")
|
||||
}
|
||||
|
||||
// versionFlag minimally complies with the -V protocol required by "go vet".
|
||||
type versionFlag struct{}
|
||||
|
||||
func (versionFlag) IsBoolFlag() bool { return true }
|
||||
func (versionFlag) Get() interface{} { return nil }
|
||||
func (versionFlag) String() string { return "" }
|
||||
func (versionFlag) Set(s string) error {
|
||||
if s != "full" {
|
||||
log.Fatalf("unsupported flag value: -V=%s", s)
|
||||
}
|
||||
|
||||
// This replicates the miminal subset of
|
||||
// cmd/internal/objabi.AddVersionFlag, which is private to the
|
||||
// go tool yet forms part of our command-line interface.
|
||||
// TODO(adonovan): clarify the contract.
|
||||
|
||||
// Print the tool version so the build system can track changes.
|
||||
// Formats:
|
||||
// $progname version devel ... buildID=...
|
||||
// $progname version go1.9.1
|
||||
progname := os.Args[0]
|
||||
f, err := os.Open(progname)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
h := sha256.New()
|
||||
if _, err := io.Copy(h, f); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
f.Close()
|
||||
fmt.Printf("%s version devel comments-go-here buildID=%02x\n",
|
||||
progname, string(h.Sum(nil)))
|
||||
os.Exit(0)
|
||||
return nil
|
||||
}
|
||||
|
||||
// A triState is a boolean that knows whether
|
||||
// it has been set to either true or false.
|
||||
// It is used to identify whether a flag appears;
|
||||
// the standard boolean flag cannot
|
||||
// distinguish missing from unset.
|
||||
// It also satisfies flag.Value.
|
||||
type triState int
|
||||
|
||||
const (
|
||||
unset triState = iota
|
||||
setTrue
|
||||
setFalse
|
||||
)
|
||||
|
||||
func triStateFlag(name string, value triState, usage string) *triState {
|
||||
flag.Var(&value, name, usage)
|
||||
return &value
|
||||
}
|
||||
|
||||
// triState implements flag.Value, flag.Getter, and flag.boolFlag.
|
||||
// They work like boolean flags: we can say vet -printf as well as vet -printf=true
|
||||
func (ts *triState) Get() interface{} {
|
||||
return *ts == setTrue
|
||||
}
|
||||
|
||||
func (ts triState) isTrue() bool {
|
||||
return ts == setTrue
|
||||
}
|
||||
|
||||
func (ts *triState) Set(value string) error {
|
||||
b, err := strconv.ParseBool(value)
|
||||
if err != nil {
|
||||
// This error message looks poor but package "flag" adds
|
||||
// "invalid boolean value %q for -NAME: %s"
|
||||
return fmt.Errorf("want true or false")
|
||||
}
|
||||
if b {
|
||||
*ts = setTrue
|
||||
} else {
|
||||
*ts = setFalse
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ts *triState) String() string {
|
||||
switch *ts {
|
||||
case unset:
|
||||
return "true"
|
||||
case setTrue:
|
||||
return "true"
|
||||
case setFalse:
|
||||
return "false"
|
||||
}
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (ts triState) IsBoolFlag() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Legacy flag support
|
||||
|
||||
// vetLegacyFlags maps flags used by legacy vet to their corresponding
|
||||
// new names. The old names will continue to work.
|
||||
var vetLegacyFlags = map[string]string{
|
||||
// Analyzer name changes
|
||||
"bool": "bools",
|
||||
"buildtags": "buildtag",
|
||||
"methods": "stdmethods",
|
||||
"rangeloops": "loopclosure",
|
||||
|
||||
// Analyzer flags
|
||||
"compositewhitelist": "composites.whitelist",
|
||||
"printfuncs": "printf.funcs",
|
||||
"shadowstrict": "shadow.strict",
|
||||
"unusedfuncs": "unusedresult.funcs",
|
||||
"unusedstringmethods": "unusedresult.stringmethods",
|
||||
}
|
||||
|
||||
// ---- output helpers common to all drivers ----
|
||||
|
||||
// PrintPlain prints a diagnostic in plain text form,
|
||||
// with context specified by the -c flag.
|
||||
func PrintPlain(fset *token.FileSet, diag analysis.Diagnostic) {
|
||||
posn := fset.Position(diag.Pos)
|
||||
fmt.Fprintf(os.Stderr, "%s: %s\n", posn, diag.Message)
|
||||
|
||||
// -c=N: show offending line plus N lines of context.
|
||||
if Context >= 0 {
|
||||
data, _ := ioutil.ReadFile(posn.Filename)
|
||||
lines := strings.Split(string(data), "\n")
|
||||
for i := posn.Line - Context; i <= posn.Line+Context; i++ {
|
||||
if 1 <= i && i <= len(lines) {
|
||||
fmt.Fprintf(os.Stderr, "%d\t%s\n", i, lines[i-1])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// A JSONTree is a mapping from package ID to analysis name to result.
|
||||
// Each result is either a jsonError or a list of jsonDiagnostic.
|
||||
type JSONTree map[string]map[string]interface{}
|
||||
|
||||
// Add adds the result of analysis 'name' on package 'id'.
|
||||
// The result is either a list of diagnostics or an error.
|
||||
func (tree JSONTree) Add(fset *token.FileSet, id, name string, diags []analysis.Diagnostic, err error) {
|
||||
var v interface{}
|
||||
if err != nil {
|
||||
type jsonError struct {
|
||||
Err string `json:"error"`
|
||||
}
|
||||
v = jsonError{err.Error()}
|
||||
} else if len(diags) > 0 {
|
||||
type jsonDiagnostic struct {
|
||||
Category string `json:"category,omitempty"`
|
||||
Posn string `json:"posn"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
var diagnostics []jsonDiagnostic
|
||||
for _, f := range diags {
|
||||
diagnostics = append(diagnostics, jsonDiagnostic{
|
||||
Category: f.Category,
|
||||
Posn: fset.Position(f.Pos).String(),
|
||||
Message: f.Message,
|
||||
})
|
||||
}
|
||||
v = diagnostics
|
||||
}
|
||||
if v != nil {
|
||||
m, ok := tree[id]
|
||||
if !ok {
|
||||
m = make(map[string]interface{})
|
||||
tree[id] = m
|
||||
}
|
||||
m[name] = v
|
||||
}
|
||||
}
|
||||
|
||||
func (tree JSONTree) Print() {
|
||||
data, err := json.MarshalIndent(tree, "", "\t")
|
||||
if err != nil {
|
||||
log.Panicf("internal error: JSON marshalling failed: %v", err)
|
||||
}
|
||||
fmt.Printf("%s\n", data)
|
||||
}
|
67
vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags_test.go
generated
vendored
Normal file
67
vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags_test.go
generated
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package analysisflags_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/tools/go/analysis"
|
||||
"golang.org/x/tools/go/analysis/internal/analysisflags"
|
||||
)
|
||||
|
||||
func main() {
|
||||
fmt.Println(analysisflags.Parse([]*analysis.Analyzer{
|
||||
{Name: "a1", Doc: "a1"},
|
||||
{Name: "a2", Doc: "a2"},
|
||||
{Name: "a3", Doc: "a3"},
|
||||
}, true))
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
// This test fork/execs the main function above.
|
||||
func TestExec(t *testing.T) {
|
||||
if runtime.GOOS != "linux" {
|
||||
t.Skipf("skipping fork/exec test on this platform")
|
||||
}
|
||||
|
||||
progname := os.Args[0]
|
||||
|
||||
if os.Getenv("ANALYSISFLAGS_CHILD") == "1" {
|
||||
// child process
|
||||
os.Args = strings.Fields(progname + " " + os.Getenv("FLAGS"))
|
||||
main()
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
for _, test := range []struct {
|
||||
flags string
|
||||
want string
|
||||
}{
|
||||
{"", "[a1 a2 a3]"},
|
||||
{"-a1=0", "[a2 a3]"},
|
||||
{"-a1=1", "[a1]"},
|
||||
{"-a1", "[a1]"},
|
||||
{"-a1=1 -a3=1", "[a1 a3]"},
|
||||
{"-a1=1 -a3=0", "[a1]"},
|
||||
} {
|
||||
cmd := exec.Command(progname, "-test.run=TestExec")
|
||||
cmd.Env = append(os.Environ(), "ANALYSISFLAGS_CHILD=1", "FLAGS="+test.flags)
|
||||
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Fatalf("exec failed: %v; output=<<%s>>", err, output)
|
||||
}
|
||||
|
||||
got := strings.TrimSpace(string(output))
|
||||
if got != test.want {
|
||||
t.Errorf("got %s, want %s", got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
89
vendor/golang.org/x/tools/go/analysis/internal/analysisflags/help.go
generated
vendored
Normal file
89
vendor/golang.org/x/tools/go/analysis/internal/analysisflags/help.go
generated
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
package analysisflags
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/tools/go/analysis"
|
||||
)
|
||||
|
||||
const help = `PROGNAME is a tool for static analysis of Go programs.
|
||||
|
||||
PROGNAME examines Go source code and reports suspicious constructs,
|
||||
such as Printf calls whose arguments do not align with the format
|
||||
string. It uses heuristics that do not guarantee all reports are
|
||||
genuine problems, but it can find errors not caught by the compilers.
|
||||
`
|
||||
|
||||
// Help implements the help subcommand for a multichecker or vet-lite
|
||||
// style command. The optional args specify the analyzers to describe.
|
||||
// Help calls log.Fatal if no such analyzer exists.
|
||||
func Help(progname string, analyzers []*analysis.Analyzer, args []string) {
|
||||
// No args: show summary of all analyzers.
|
||||
if len(args) == 0 {
|
||||
fmt.Println(strings.Replace(help, "PROGNAME", progname, -1))
|
||||
fmt.Println("Registered analyzers:")
|
||||
fmt.Println()
|
||||
sort.Slice(analyzers, func(i, j int) bool {
|
||||
return analyzers[i].Name < analyzers[j].Name
|
||||
})
|
||||
for _, a := range analyzers {
|
||||
title := strings.Split(a.Doc, "\n\n")[0]
|
||||
fmt.Printf(" %-12s %s\n", a.Name, title)
|
||||
}
|
||||
fmt.Println("\nBy default all analyzers are run.")
|
||||
fmt.Println("To select specific analyzers, use the -NAME flag for each one,")
|
||||
fmt.Println(" or -NAME=false to run all analyzers not explicitly disabled.")
|
||||
|
||||
// Show only the core command-line flags.
|
||||
fmt.Println("\nCore flags:")
|
||||
fmt.Println()
|
||||
fs := flag.NewFlagSet("", flag.ExitOnError)
|
||||
flag.VisitAll(func(f *flag.Flag) {
|
||||
if !strings.Contains(f.Name, ".") {
|
||||
fs.Var(f.Value, f.Name, f.Usage)
|
||||
}
|
||||
})
|
||||
fs.PrintDefaults()
|
||||
|
||||
fmt.Printf("\nTo see details and flags of a specific analyzer, run '%s help name'.\n", progname)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Show help on specific analyzer(s).
|
||||
outer:
|
||||
for _, arg := range args {
|
||||
for _, a := range analyzers {
|
||||
if a.Name == arg {
|
||||
paras := strings.Split(a.Doc, "\n\n")
|
||||
title := paras[0]
|
||||
fmt.Printf("%s: %s\n", a.Name, title)
|
||||
|
||||
// Show only the flags relating to this analysis,
|
||||
// properly prefixed.
|
||||
first := true
|
||||
fs := flag.NewFlagSet(a.Name, flag.ExitOnError)
|
||||
a.Flags.VisitAll(func(f *flag.Flag) {
|
||||
if first {
|
||||
first = false
|
||||
fmt.Println("\nAnalyzer flags:")
|
||||
fmt.Println()
|
||||
}
|
||||
fs.Var(f.Value, a.Name+"."+f.Name, f.Usage)
|
||||
})
|
||||
fs.PrintDefaults()
|
||||
|
||||
if len(paras) > 1 {
|
||||
fmt.Printf("\n%s\n", strings.Join(paras[1:], "\n\n"))
|
||||
}
|
||||
|
||||
continue outer
|
||||
}
|
||||
}
|
||||
log.Fatalf("Analyzer %q not registered", arg)
|
||||
}
|
||||
}
|
701
vendor/golang.org/x/tools/go/analysis/internal/checker/checker.go
generated
vendored
Normal file
701
vendor/golang.org/x/tools/go/analysis/internal/checker/checker.go
generated
vendored
Normal file
@@ -0,0 +1,701 @@
|
||||
// Package checker defines the implementation of the checker commands.
|
||||
// The same code drives the multi-analysis driver, the single-analysis
|
||||
// driver that is conventionally provided for convenience along with
|
||||
// each analysis package, and the test driver.
|
||||
package checker
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/gob"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"runtime/trace"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/tools/go/analysis"
|
||||
"golang.org/x/tools/go/analysis/internal/analysisflags"
|
||||
"golang.org/x/tools/go/packages"
|
||||
)
|
||||
|
||||
var (
|
||||
// Debug is a set of single-letter flags:
|
||||
//
|
||||
// f show [f]acts as they are created
|
||||
// p disable [p]arallel execution of analyzers
|
||||
// s do additional [s]anity checks on fact types and serialization
|
||||
// t show [t]iming info (NB: use 'p' flag to avoid GC/scheduler noise)
|
||||
// v show [v]erbose logging
|
||||
//
|
||||
Debug = ""
|
||||
|
||||
// Log files for optional performance tracing.
|
||||
CPUProfile, MemProfile, Trace string
|
||||
)
|
||||
|
||||
// RegisterFlags registers command-line flags used the analysis driver.
|
||||
func RegisterFlags() {
|
||||
// When adding flags here, remember to update
|
||||
// the list of suppressed flags in analysisflags.
|
||||
|
||||
flag.StringVar(&Debug, "debug", Debug, `debug flags, any subset of "lpsv"`)
|
||||
|
||||
flag.StringVar(&CPUProfile, "cpuprofile", "", "write CPU profile to this file")
|
||||
flag.StringVar(&MemProfile, "memprofile", "", "write memory profile to this file")
|
||||
flag.StringVar(&Trace, "trace", "", "write trace log to this file")
|
||||
}
|
||||
|
||||
// Run loads the packages specified by args using go/packages,
|
||||
// then applies the specified analyzers to them.
|
||||
// Analysis flags must already have been set.
|
||||
// It provides most of the logic for the main functions of both the
|
||||
// singlechecker and the multi-analysis commands.
|
||||
// It returns the appropriate exit code.
|
||||
func Run(args []string, analyzers []*analysis.Analyzer) (exitcode int) {
|
||||
if CPUProfile != "" {
|
||||
f, err := os.Create(CPUProfile)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
// NB: profile won't be written in case of error.
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
|
||||
if Trace != "" {
|
||||
f, err := os.Create(Trace)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if err := trace.Start(f); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
// NB: trace log won't be written in case of error.
|
||||
defer func() {
|
||||
trace.Stop()
|
||||
log.Printf("To view the trace, run:\n$ go tool trace view %s", Trace)
|
||||
}()
|
||||
}
|
||||
|
||||
if MemProfile != "" {
|
||||
f, err := os.Create(MemProfile)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
// NB: memprofile won't be written in case of error.
|
||||
defer func() {
|
||||
runtime.GC() // get up-to-date statistics
|
||||
if err := pprof.WriteHeapProfile(f); err != nil {
|
||||
log.Fatalf("Writing memory profile: %v", err)
|
||||
}
|
||||
f.Close()
|
||||
}()
|
||||
}
|
||||
|
||||
// Load the packages.
|
||||
if dbg('v') {
|
||||
log.SetPrefix("")
|
||||
log.SetFlags(log.Lmicroseconds) // display timing
|
||||
log.Printf("load %s", args)
|
||||
}
|
||||
|
||||
// Optimization: if the selected analyzers don't produce/consume
|
||||
// facts, we need source only for the initial packages.
|
||||
allSyntax := needFacts(analyzers)
|
||||
initial, err := load(args, allSyntax)
|
||||
if err != nil {
|
||||
log.Print(err)
|
||||
return 1 // load errors
|
||||
}
|
||||
|
||||
// Print the results.
|
||||
roots := analyze(initial, analyzers)
|
||||
|
||||
return printDiagnostics(roots)
|
||||
}
|
||||
|
||||
// load loads the initial packages.
|
||||
func load(patterns []string, allSyntax bool) ([]*packages.Package, error) {
|
||||
mode := packages.LoadSyntax
|
||||
if allSyntax {
|
||||
mode = packages.LoadAllSyntax
|
||||
}
|
||||
conf := packages.Config{
|
||||
Mode: mode,
|
||||
Tests: true,
|
||||
}
|
||||
initial, err := packages.Load(&conf, patterns...)
|
||||
if err == nil {
|
||||
if n := packages.PrintErrors(initial); n > 1 {
|
||||
err = fmt.Errorf("%d errors during loading", n)
|
||||
} else if n == 1 {
|
||||
err = fmt.Errorf("error during loading")
|
||||
}
|
||||
}
|
||||
if len(initial) == 0 {
|
||||
err = fmt.Errorf("%s matched no packages", strings.Join(patterns, " "))
|
||||
}
|
||||
return initial, err
|
||||
}
|
||||
|
||||
// TestAnalyzer applies an analysis to a set of packages (and their
|
||||
// dependencies if necessary) and returns the results.
|
||||
//
|
||||
// Facts about pkg are returned in a map keyed by object; package facts
|
||||
// have a nil key.
|
||||
//
|
||||
// This entry point is used only by analysistest.
|
||||
func TestAnalyzer(a *analysis.Analyzer, pkgs []*packages.Package) []*TestAnalyzerResult {
|
||||
var results []*TestAnalyzerResult
|
||||
for _, act := range analyze(pkgs, []*analysis.Analyzer{a}) {
|
||||
facts := make(map[types.Object][]analysis.Fact)
|
||||
for key, fact := range act.objectFacts {
|
||||
if key.obj.Pkg() == act.pass.Pkg {
|
||||
facts[key.obj] = append(facts[key.obj], fact)
|
||||
}
|
||||
}
|
||||
for key, fact := range act.packageFacts {
|
||||
if key.pkg == act.pass.Pkg {
|
||||
facts[nil] = append(facts[nil], fact)
|
||||
}
|
||||
}
|
||||
|
||||
results = append(results, &TestAnalyzerResult{act.pass, act.diagnostics, facts, act.result, act.err})
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
type TestAnalyzerResult struct {
|
||||
Pass *analysis.Pass
|
||||
Diagnostics []analysis.Diagnostic
|
||||
Facts map[types.Object][]analysis.Fact
|
||||
Result interface{}
|
||||
Err error
|
||||
}
|
||||
|
||||
func analyze(pkgs []*packages.Package, analyzers []*analysis.Analyzer) []*action {
|
||||
// Construct the action graph.
|
||||
if dbg('v') {
|
||||
log.Printf("building graph of analysis passes")
|
||||
}
|
||||
|
||||
// Each graph node (action) is one unit of analysis.
|
||||
// Edges express package-to-package (vertical) dependencies,
|
||||
// and analysis-to-analysis (horizontal) dependencies.
|
||||
type key struct {
|
||||
*analysis.Analyzer
|
||||
*packages.Package
|
||||
}
|
||||
actions := make(map[key]*action)
|
||||
|
||||
var mkAction func(a *analysis.Analyzer, pkg *packages.Package) *action
|
||||
mkAction = func(a *analysis.Analyzer, pkg *packages.Package) *action {
|
||||
k := key{a, pkg}
|
||||
act, ok := actions[k]
|
||||
if !ok {
|
||||
act = &action{a: a, pkg: pkg}
|
||||
|
||||
// Add a dependency on each required analyzers.
|
||||
for _, req := range a.Requires {
|
||||
act.deps = append(act.deps, mkAction(req, pkg))
|
||||
}
|
||||
|
||||
// An analysis that consumes/produces facts
|
||||
// must run on the package's dependencies too.
|
||||
if len(a.FactTypes) > 0 {
|
||||
paths := make([]string, 0, len(pkg.Imports))
|
||||
for path := range pkg.Imports {
|
||||
paths = append(paths, path)
|
||||
}
|
||||
sort.Strings(paths) // for determinism
|
||||
for _, path := range paths {
|
||||
dep := mkAction(a, pkg.Imports[path])
|
||||
act.deps = append(act.deps, dep)
|
||||
}
|
||||
}
|
||||
|
||||
actions[k] = act
|
||||
}
|
||||
return act
|
||||
}
|
||||
|
||||
// Build nodes for initial packages.
|
||||
var roots []*action
|
||||
for _, a := range analyzers {
|
||||
for _, pkg := range pkgs {
|
||||
root := mkAction(a, pkg)
|
||||
root.isroot = true
|
||||
roots = append(roots, root)
|
||||
}
|
||||
}
|
||||
|
||||
// Execute the graph in parallel.
|
||||
execAll(roots)
|
||||
|
||||
return roots
|
||||
}
|
||||
|
||||
// printDiagnostics prints the diagnostics for the root packages in either
|
||||
// plain text or JSON format. JSON format also includes errors for any
|
||||
// dependencies.
|
||||
//
|
||||
// It returns the exitcode: in plain mode, 0 for success, 1 for analysis
|
||||
// errors, and 3 for diagnostics. We avoid 2 since the flag package uses
|
||||
// it. JSON mode always succeeds at printing errors and diagnostics in a
|
||||
// structured form to stdout.
|
||||
func printDiagnostics(roots []*action) (exitcode int) {
|
||||
// Print the output.
|
||||
//
|
||||
// Print diagnostics only for root packages,
|
||||
// but errors for all packages.
|
||||
printed := make(map[*action]bool)
|
||||
var print func(*action)
|
||||
var visitAll func(actions []*action)
|
||||
visitAll = func(actions []*action) {
|
||||
for _, act := range actions {
|
||||
if !printed[act] {
|
||||
printed[act] = true
|
||||
visitAll(act.deps)
|
||||
print(act)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if analysisflags.JSON {
|
||||
// JSON output
|
||||
tree := make(analysisflags.JSONTree)
|
||||
print = func(act *action) {
|
||||
var diags []analysis.Diagnostic
|
||||
if act.isroot {
|
||||
diags = act.diagnostics
|
||||
}
|
||||
tree.Add(act.pkg.Fset, act.pkg.ID, act.a.Name, diags, act.err)
|
||||
}
|
||||
visitAll(roots)
|
||||
tree.Print()
|
||||
} else {
|
||||
// plain text output
|
||||
|
||||
// De-duplicate diagnostics by position (not token.Pos) to
|
||||
// avoid double-reporting in source files that belong to
|
||||
// multiple packages, such as foo and foo.test.
|
||||
type key struct {
|
||||
token.Position
|
||||
*analysis.Analyzer
|
||||
message string
|
||||
}
|
||||
seen := make(map[key]bool)
|
||||
|
||||
print = func(act *action) {
|
||||
if act.err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%s: %v\n", act.a.Name, act.err)
|
||||
exitcode = 1 // analysis failed, at least partially
|
||||
return
|
||||
}
|
||||
if act.isroot {
|
||||
for _, diag := range act.diagnostics {
|
||||
// We don't display a.Name/f.Category
|
||||
// as most users don't care.
|
||||
|
||||
posn := act.pkg.Fset.Position(diag.Pos)
|
||||
k := key{posn, act.a, diag.Message}
|
||||
if seen[k] {
|
||||
continue // duplicate
|
||||
}
|
||||
seen[k] = true
|
||||
|
||||
analysisflags.PrintPlain(act.pkg.Fset, diag)
|
||||
}
|
||||
}
|
||||
}
|
||||
visitAll(roots)
|
||||
|
||||
if exitcode == 0 && len(seen) > 0 {
|
||||
exitcode = 3 // successfuly produced diagnostics
|
||||
}
|
||||
}
|
||||
|
||||
// Print timing info.
|
||||
if dbg('t') {
|
||||
if !dbg('p') {
|
||||
log.Println("Warning: times are mostly GC/scheduler noise; use -debug=tp to disable parallelism")
|
||||
}
|
||||
var all []*action
|
||||
var total time.Duration
|
||||
for act := range printed {
|
||||
all = append(all, act)
|
||||
total += act.duration
|
||||
}
|
||||
sort.Slice(all, func(i, j int) bool {
|
||||
return all[i].duration > all[j].duration
|
||||
})
|
||||
|
||||
// Print actions accounting for 90% of the total.
|
||||
var sum time.Duration
|
||||
for _, act := range all {
|
||||
fmt.Fprintf(os.Stderr, "%s\t%s\n", act.duration, act)
|
||||
sum += act.duration
|
||||
if sum >= total*9/10 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return exitcode
|
||||
}
|
||||
|
||||
// needFacts reports whether any analysis required by the specified set
|
||||
// needs facts. If so, we must load the entire program from source.
|
||||
func needFacts(analyzers []*analysis.Analyzer) bool {
|
||||
seen := make(map[*analysis.Analyzer]bool)
|
||||
var q []*analysis.Analyzer // for BFS
|
||||
q = append(q, analyzers...)
|
||||
for len(q) > 0 {
|
||||
a := q[0]
|
||||
q = q[1:]
|
||||
if !seen[a] {
|
||||
seen[a] = true
|
||||
if len(a.FactTypes) > 0 {
|
||||
return true
|
||||
}
|
||||
q = append(q, a.Requires...)
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// An action represents one unit of analysis work: the application of
|
||||
// one analysis to one package. Actions form a DAG, both within a
|
||||
// package (as different analyzers are applied, either in sequence or
|
||||
// parallel), and across packages (as dependencies are analyzed).
|
||||
type action struct {
|
||||
once sync.Once
|
||||
a *analysis.Analyzer
|
||||
pkg *packages.Package
|
||||
pass *analysis.Pass
|
||||
isroot bool
|
||||
deps []*action
|
||||
objectFacts map[objectFactKey]analysis.Fact
|
||||
packageFacts map[packageFactKey]analysis.Fact
|
||||
inputs map[*analysis.Analyzer]interface{}
|
||||
result interface{}
|
||||
diagnostics []analysis.Diagnostic
|
||||
err error
|
||||
duration time.Duration
|
||||
}
|
||||
|
||||
type objectFactKey struct {
|
||||
obj types.Object
|
||||
typ reflect.Type
|
||||
}
|
||||
|
||||
type packageFactKey struct {
|
||||
pkg *types.Package
|
||||
typ reflect.Type
|
||||
}
|
||||
|
||||
func (act *action) String() string {
|
||||
return fmt.Sprintf("%s@%s", act.a, act.pkg)
|
||||
}
|
||||
|
||||
func execAll(actions []*action) {
|
||||
sequential := dbg('p')
|
||||
var wg sync.WaitGroup
|
||||
for _, act := range actions {
|
||||
wg.Add(1)
|
||||
work := func(act *action) {
|
||||
act.exec()
|
||||
wg.Done()
|
||||
}
|
||||
if sequential {
|
||||
work(act)
|
||||
} else {
|
||||
go work(act)
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func (act *action) exec() { act.once.Do(act.execOnce) }
|
||||
|
||||
func (act *action) execOnce() {
|
||||
// Analyze dependencies.
|
||||
execAll(act.deps)
|
||||
|
||||
// TODO(adonovan): uncomment this during profiling.
|
||||
// It won't build pre-go1.11 but conditional compilation
|
||||
// using build tags isn't warranted.
|
||||
//
|
||||
// ctx, task := trace.NewTask(context.Background(), "exec")
|
||||
// trace.Log(ctx, "pass", act.String())
|
||||
// defer task.End()
|
||||
|
||||
// Record time spent in this node but not its dependencies.
|
||||
// In parallel mode, due to GC/scheduler contention, the
|
||||
// time is 5x higher than in sequential mode, even with a
|
||||
// semaphore limiting the number of threads here.
|
||||
// So use -debug=tp.
|
||||
if dbg('t') {
|
||||
t0 := time.Now()
|
||||
defer func() { act.duration = time.Since(t0) }()
|
||||
}
|
||||
|
||||
// Report an error if any dependency failed.
|
||||
var failed []string
|
||||
for _, dep := range act.deps {
|
||||
if dep.err != nil {
|
||||
failed = append(failed, dep.String())
|
||||
}
|
||||
}
|
||||
if failed != nil {
|
||||
sort.Strings(failed)
|
||||
act.err = fmt.Errorf("failed prerequisites: %s", strings.Join(failed, ", "))
|
||||
return
|
||||
}
|
||||
|
||||
// Plumb the output values of the dependencies
|
||||
// into the inputs of this action. Also facts.
|
||||
inputs := make(map[*analysis.Analyzer]interface{})
|
||||
act.objectFacts = make(map[objectFactKey]analysis.Fact)
|
||||
act.packageFacts = make(map[packageFactKey]analysis.Fact)
|
||||
for _, dep := range act.deps {
|
||||
if dep.pkg == act.pkg {
|
||||
// Same package, different analysis (horizontal edge):
|
||||
// in-memory outputs of prerequisite analyzers
|
||||
// become inputs to this analysis pass.
|
||||
inputs[dep.a] = dep.result
|
||||
|
||||
} else if dep.a == act.a { // (always true)
|
||||
// Same analysis, different package (vertical edge):
|
||||
// serialized facts produced by prerequisite analysis
|
||||
// become available to this analysis pass.
|
||||
inheritFacts(act, dep)
|
||||
}
|
||||
}
|
||||
|
||||
// Run the analysis.
|
||||
pass := &analysis.Pass{
|
||||
Analyzer: act.a,
|
||||
Fset: act.pkg.Fset,
|
||||
Files: act.pkg.Syntax,
|
||||
OtherFiles: act.pkg.OtherFiles,
|
||||
Pkg: act.pkg.Types,
|
||||
TypesInfo: act.pkg.TypesInfo,
|
||||
ResultOf: inputs,
|
||||
Report: func(d analysis.Diagnostic) { act.diagnostics = append(act.diagnostics, d) },
|
||||
ImportObjectFact: act.importObjectFact,
|
||||
ExportObjectFact: act.exportObjectFact,
|
||||
ImportPackageFact: act.importPackageFact,
|
||||
ExportPackageFact: act.exportPackageFact,
|
||||
}
|
||||
act.pass = pass
|
||||
|
||||
var err error
|
||||
if act.pkg.IllTyped && !pass.Analyzer.RunDespiteErrors {
|
||||
err = fmt.Errorf("analysis skipped due to errors in package")
|
||||
} else {
|
||||
act.result, err = pass.Analyzer.Run(pass)
|
||||
if err == nil {
|
||||
if got, want := reflect.TypeOf(act.result), pass.Analyzer.ResultType; got != want {
|
||||
err = fmt.Errorf(
|
||||
"internal error: on package %s, analyzer %s returned a result of type %v, but declared ResultType %v",
|
||||
pass.Pkg.Path(), pass.Analyzer, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
act.err = err
|
||||
|
||||
// disallow calls after Run
|
||||
pass.ExportObjectFact = nil
|
||||
pass.ExportPackageFact = nil
|
||||
}
|
||||
|
||||
// inheritFacts populates act.facts with
|
||||
// those it obtains from its dependency, dep.
|
||||
func inheritFacts(act, dep *action) {
|
||||
serialize := dbg('s')
|
||||
|
||||
for key, fact := range dep.objectFacts {
|
||||
// Filter out facts related to objects
|
||||
// that are irrelevant downstream
|
||||
// (equivalently: not in the compiler export data).
|
||||
if !exportedFrom(key.obj, dep.pkg.Types) {
|
||||
if false {
|
||||
log.Printf("%v: discarding %T fact from %s for %s: %s", act, fact, dep, key.obj, fact)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Optionally serialize/deserialize fact
|
||||
// to verify that it works across address spaces.
|
||||
if serialize {
|
||||
var err error
|
||||
fact, err = codeFact(fact)
|
||||
if err != nil {
|
||||
log.Panicf("internal error: encoding of %T fact failed in %v", fact, act)
|
||||
}
|
||||
}
|
||||
|
||||
if false {
|
||||
log.Printf("%v: inherited %T fact for %s: %s", act, fact, key.obj, fact)
|
||||
}
|
||||
act.objectFacts[key] = fact
|
||||
}
|
||||
|
||||
for key, fact := range dep.packageFacts {
|
||||
// TODO: filter out facts that belong to
|
||||
// packages not mentioned in the export data
|
||||
// to prevent side channels.
|
||||
|
||||
// Optionally serialize/deserialize fact
|
||||
// to verify that it works across address spaces
|
||||
// and is deterministic.
|
||||
if serialize {
|
||||
var err error
|
||||
fact, err = codeFact(fact)
|
||||
if err != nil {
|
||||
log.Panicf("internal error: encoding of %T fact failed in %v", fact, act)
|
||||
}
|
||||
}
|
||||
|
||||
if false {
|
||||
log.Printf("%v: inherited %T fact for %s: %s", act, fact, key.pkg.Path(), fact)
|
||||
}
|
||||
act.packageFacts[key] = fact
|
||||
}
|
||||
}
|
||||
|
||||
// codeFact encodes then decodes a fact,
|
||||
// just to exercise that logic.
|
||||
func codeFact(fact analysis.Fact) (analysis.Fact, error) {
|
||||
// We encode facts one at a time.
|
||||
// A real modular driver would emit all facts
|
||||
// into one encoder to improve gob efficiency.
|
||||
var buf bytes.Buffer
|
||||
if err := gob.NewEncoder(&buf).Encode(fact); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Encode it twice and assert that we get the same bits.
|
||||
// This helps detect nondeterministic Gob encoding (e.g. of maps).
|
||||
var buf2 bytes.Buffer
|
||||
if err := gob.NewEncoder(&buf2).Encode(fact); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !bytes.Equal(buf.Bytes(), buf2.Bytes()) {
|
||||
return nil, fmt.Errorf("encoding of %T fact is nondeterministic", fact)
|
||||
}
|
||||
|
||||
new := reflect.New(reflect.TypeOf(fact).Elem()).Interface().(analysis.Fact)
|
||||
if err := gob.NewDecoder(&buf).Decode(new); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return new, nil
|
||||
}
|
||||
|
||||
// exportedFrom reports whether obj may be visible to a package that imports pkg.
|
||||
// This includes not just the exported members of pkg, but also unexported
|
||||
// constants, types, fields, and methods, perhaps belonging to oether packages,
|
||||
// that find there way into the API.
|
||||
// This is an overapproximation of the more accurate approach used by
|
||||
// gc export data, which walks the type graph, but it's much simpler.
|
||||
//
|
||||
// TODO(adonovan): do more accurate filtering by walking the type graph.
|
||||
func exportedFrom(obj types.Object, pkg *types.Package) bool {
|
||||
switch obj := obj.(type) {
|
||||
case *types.Func:
|
||||
return obj.Exported() && obj.Pkg() == pkg ||
|
||||
obj.Type().(*types.Signature).Recv() != nil
|
||||
case *types.Var:
|
||||
return obj.Exported() && obj.Pkg() == pkg ||
|
||||
obj.IsField()
|
||||
case *types.TypeName, *types.Const:
|
||||
return true
|
||||
}
|
||||
return false // Nil, Builtin, Label, or PkgName
|
||||
}
|
||||
|
||||
// importObjectFact implements Pass.ImportObjectFact.
|
||||
// Given a non-nil pointer ptr of type *T, where *T satisfies Fact,
|
||||
// importObjectFact copies the fact value to *ptr.
|
||||
func (act *action) importObjectFact(obj types.Object, ptr analysis.Fact) bool {
|
||||
if obj == nil {
|
||||
panic("nil object")
|
||||
}
|
||||
key := objectFactKey{obj, factType(ptr)}
|
||||
if v, ok := act.objectFacts[key]; ok {
|
||||
reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem())
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// exportObjectFact implements Pass.ExportObjectFact.
|
||||
func (act *action) exportObjectFact(obj types.Object, fact analysis.Fact) {
|
||||
if act.pass.ExportObjectFact == nil {
|
||||
log.Panicf("%s: Pass.ExportObjectFact(%s, %T) called after Run", act, obj, fact)
|
||||
}
|
||||
|
||||
if obj.Pkg() != act.pkg.Types {
|
||||
log.Panicf("internal error: in analysis %s of package %s: Fact.Set(%s, %T): can't set facts on objects belonging another package",
|
||||
act.a, act.pkg, obj, fact)
|
||||
}
|
||||
|
||||
key := objectFactKey{obj, factType(fact)}
|
||||
act.objectFacts[key] = fact // clobber any existing entry
|
||||
if dbg('f') {
|
||||
objstr := types.ObjectString(obj, (*types.Package).Name)
|
||||
fmt.Fprintf(os.Stderr, "%s: object %s has fact %s\n",
|
||||
act.pkg.Fset.Position(obj.Pos()), objstr, fact)
|
||||
}
|
||||
}
|
||||
|
||||
// importPackageFact implements Pass.ImportPackageFact.
|
||||
// Given a non-nil pointer ptr of type *T, where *T satisfies Fact,
|
||||
// fact copies the fact value to *ptr.
|
||||
func (act *action) importPackageFact(pkg *types.Package, ptr analysis.Fact) bool {
|
||||
if pkg == nil {
|
||||
panic("nil package")
|
||||
}
|
||||
key := packageFactKey{pkg, factType(ptr)}
|
||||
if v, ok := act.packageFacts[key]; ok {
|
||||
reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem())
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// exportPackageFact implements Pass.ExportPackageFact.
|
||||
func (act *action) exportPackageFact(fact analysis.Fact) {
|
||||
if act.pass.ExportPackageFact == nil {
|
||||
log.Panicf("%s: Pass.ExportPackageFact(%T) called after Run", act, fact)
|
||||
}
|
||||
|
||||
key := packageFactKey{act.pass.Pkg, factType(fact)}
|
||||
act.packageFacts[key] = fact // clobber any existing entry
|
||||
if dbg('f') {
|
||||
fmt.Fprintf(os.Stderr, "%s: package %s has fact %s\n",
|
||||
act.pkg.Fset.Position(act.pass.Files[0].Pos()), act.pass.Pkg.Path(), fact)
|
||||
}
|
||||
}
|
||||
|
||||
func factType(fact analysis.Fact) reflect.Type {
|
||||
t := reflect.TypeOf(fact)
|
||||
if t.Kind() != reflect.Ptr {
|
||||
log.Fatalf("invalid Fact type: got %T, want pointer", t)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
func dbg(b byte) bool { return strings.IndexByte(Debug, b) >= 0 }
|
299
vendor/golang.org/x/tools/go/analysis/internal/facts/facts.go
generated
vendored
Normal file
299
vendor/golang.org/x/tools/go/analysis/internal/facts/facts.go
generated
vendored
Normal file
@@ -0,0 +1,299 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package facts defines a serializable set of analysis.Fact.
|
||||
//
|
||||
// It provides a partial implementation of the Fact-related parts of the
|
||||
// analysis.Pass interface for use in analysis drivers such as "go vet"
|
||||
// and other build systems.
|
||||
//
|
||||
// The serial format is unspecified and may change, so the same version
|
||||
// of this package must be used for reading and writing serialized facts.
|
||||
//
|
||||
// The handling of facts in the analysis system parallels the handling
|
||||
// of type information in the compiler: during compilation of package P,
|
||||
// the compiler emits an export data file that describes the type of
|
||||
// every object (named thing) defined in package P, plus every object
|
||||
// indirectly reachable from one of those objects. Thus the downstream
|
||||
// compiler of package Q need only load one export data file per direct
|
||||
// import of Q, and it will learn everything about the API of package P
|
||||
// and everything it needs to know about the API of P's dependencies.
|
||||
//
|
||||
// Similarly, analysis of package P emits a fact set containing facts
|
||||
// about all objects exported from P, plus additional facts about only
|
||||
// those objects of P's dependencies that are reachable from the API of
|
||||
// package P; the downstream analysis of Q need only load one fact set
|
||||
// per direct import of Q.
|
||||
//
|
||||
// The notion of "exportedness" that matters here is that of the
|
||||
// compiler. According to the language spec, a method pkg.T.f is
|
||||
// unexported simply because its name starts with lowercase. But the
|
||||
// compiler must nonethless export f so that downstream compilations can
|
||||
// accurately ascertain whether pkg.T implements an interface pkg.I
|
||||
// defined as interface{f()}. Exported thus means "described in export
|
||||
// data".
|
||||
//
|
||||
package facts
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/gob"
|
||||
"fmt"
|
||||
"go/types"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"reflect"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/tools/go/analysis"
|
||||
"golang.org/x/tools/go/types/objectpath"
|
||||
)
|
||||
|
||||
const debug = false
|
||||
|
||||
// A Set is a set of analysis.Facts.
|
||||
//
|
||||
// Decode creates a Set of facts by reading from the imports of a given
|
||||
// package, and Encode writes out the set. Between these operation,
|
||||
// the Import and Export methods will query and update the set.
|
||||
//
|
||||
// All of Set's methods except String are safe to call concurrently.
|
||||
type Set struct {
|
||||
pkg *types.Package
|
||||
mu sync.Mutex
|
||||
m map[key]analysis.Fact
|
||||
}
|
||||
|
||||
type key struct {
|
||||
pkg *types.Package
|
||||
obj types.Object // (object facts only)
|
||||
t reflect.Type
|
||||
}
|
||||
|
||||
// ImportObjectFact implements analysis.Pass.ImportObjectFact.
|
||||
func (s *Set) ImportObjectFact(obj types.Object, ptr analysis.Fact) bool {
|
||||
if obj == nil {
|
||||
panic("nil object")
|
||||
}
|
||||
key := key{pkg: obj.Pkg(), obj: obj, t: reflect.TypeOf(ptr)}
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if v, ok := s.m[key]; ok {
|
||||
reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem())
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ExportObjectFact implements analysis.Pass.ExportObjectFact.
|
||||
func (s *Set) ExportObjectFact(obj types.Object, fact analysis.Fact) {
|
||||
if obj.Pkg() != s.pkg {
|
||||
log.Panicf("in package %s: ExportObjectFact(%s, %T): can't set fact on object belonging another package",
|
||||
s.pkg, obj, fact)
|
||||
}
|
||||
key := key{pkg: obj.Pkg(), obj: obj, t: reflect.TypeOf(fact)}
|
||||
s.mu.Lock()
|
||||
s.m[key] = fact // clobber any existing entry
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
// ImportPackageFact implements analysis.Pass.ImportPackageFact.
|
||||
func (s *Set) ImportPackageFact(pkg *types.Package, ptr analysis.Fact) bool {
|
||||
if pkg == nil {
|
||||
panic("nil package")
|
||||
}
|
||||
key := key{pkg: pkg, t: reflect.TypeOf(ptr)}
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if v, ok := s.m[key]; ok {
|
||||
reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem())
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ExportPackageFact implements analysis.Pass.ExportPackageFact.
|
||||
func (s *Set) ExportPackageFact(fact analysis.Fact) {
|
||||
key := key{pkg: s.pkg, t: reflect.TypeOf(fact)}
|
||||
s.mu.Lock()
|
||||
s.m[key] = fact // clobber any existing entry
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
// gobFact is the Gob declaration of a serialized fact.
|
||||
type gobFact struct {
|
||||
PkgPath string // path of package
|
||||
Object objectpath.Path // optional path of object relative to package itself
|
||||
Fact analysis.Fact // type and value of user-defined Fact
|
||||
}
|
||||
|
||||
// Decode decodes all the facts relevant to the analysis of package pkg.
|
||||
// The read function reads serialized fact data from an external source
|
||||
// for one of of pkg's direct imports. The empty file is a valid
|
||||
// encoding of an empty fact set.
|
||||
//
|
||||
// It is the caller's responsibility to call gob.Register on all
|
||||
// necessary fact types.
|
||||
func Decode(pkg *types.Package, read func(packagePath string) ([]byte, error)) (*Set, error) {
|
||||
// Compute the import map for this package.
|
||||
// See the package doc comment.
|
||||
packages := importMap(pkg.Imports())
|
||||
|
||||
// Read facts from imported packages.
|
||||
// Facts may describe indirectly imported packages, or their objects.
|
||||
m := make(map[key]analysis.Fact) // one big bucket
|
||||
for _, imp := range pkg.Imports() {
|
||||
logf := func(format string, args ...interface{}) {
|
||||
if debug {
|
||||
prefix := fmt.Sprintf("in %s, importing %s: ",
|
||||
pkg.Path(), imp.Path())
|
||||
log.Print(prefix, fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
// Read the gob-encoded facts.
|
||||
data, err := read(imp.Path())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("in %s, can't import facts for package %q: %v",
|
||||
pkg.Path(), imp.Path(), err)
|
||||
}
|
||||
if len(data) == 0 {
|
||||
continue // no facts
|
||||
}
|
||||
var gobFacts []gobFact
|
||||
if err := gob.NewDecoder(bytes.NewReader(data)).Decode(&gobFacts); err != nil {
|
||||
return nil, fmt.Errorf("decoding facts for %q: %v", imp.Path(), err)
|
||||
}
|
||||
if debug {
|
||||
logf("decoded %d facts: %v", len(gobFacts), gobFacts)
|
||||
}
|
||||
|
||||
// Parse each one into a key and a Fact.
|
||||
for _, f := range gobFacts {
|
||||
factPkg := packages[f.PkgPath]
|
||||
if factPkg == nil {
|
||||
// Fact relates to a dependency that was
|
||||
// unused in this translation unit. Skip.
|
||||
logf("no package %q; discarding %v", f.PkgPath, f.Fact)
|
||||
continue
|
||||
}
|
||||
key := key{pkg: factPkg, t: reflect.TypeOf(f.Fact)}
|
||||
if f.Object != "" {
|
||||
// object fact
|
||||
obj, err := objectpath.Object(factPkg, f.Object)
|
||||
if err != nil {
|
||||
// (most likely due to unexported object)
|
||||
// TODO(adonovan): audit for other possibilities.
|
||||
logf("no object for path: %v; discarding %s", err, f.Fact)
|
||||
continue
|
||||
}
|
||||
key.obj = obj
|
||||
logf("read %T fact %s for %v", f.Fact, f.Fact, key.obj)
|
||||
} else {
|
||||
// package fact
|
||||
logf("read %T fact %s for %v", f.Fact, f.Fact, factPkg)
|
||||
}
|
||||
m[key] = f.Fact
|
||||
}
|
||||
}
|
||||
|
||||
return &Set{pkg: pkg, m: m}, nil
|
||||
}
|
||||
|
||||
// Encode encodes a set of facts to a memory buffer.
|
||||
//
|
||||
// It may fail if one of the Facts could not be gob-encoded, but this is
|
||||
// a sign of a bug in an Analyzer.
|
||||
func (s *Set) Encode() []byte {
|
||||
|
||||
// TODO(adonovan): opt: use a more efficient encoding
|
||||
// that avoids repeating PkgPath for each fact.
|
||||
|
||||
// Gather all facts, including those from imported packages.
|
||||
var gobFacts []gobFact
|
||||
|
||||
s.mu.Lock()
|
||||
for k, fact := range s.m {
|
||||
if debug {
|
||||
log.Printf("%v => %s\n", k, fact)
|
||||
}
|
||||
var object objectpath.Path
|
||||
if k.obj != nil {
|
||||
path, err := objectpath.For(k.obj)
|
||||
if err != nil {
|
||||
if debug {
|
||||
log.Printf("discarding fact %s about %s\n", fact, k.obj)
|
||||
}
|
||||
continue // object not accessible from package API; discard fact
|
||||
}
|
||||
object = path
|
||||
}
|
||||
gobFacts = append(gobFacts, gobFact{
|
||||
PkgPath: k.pkg.Path(),
|
||||
Object: object,
|
||||
Fact: fact,
|
||||
})
|
||||
}
|
||||
s.mu.Unlock()
|
||||
|
||||
// Sort facts by (package, object, type) for determinism.
|
||||
sort.Slice(gobFacts, func(i, j int) bool {
|
||||
x, y := gobFacts[i], gobFacts[j]
|
||||
if x.PkgPath != y.PkgPath {
|
||||
return x.PkgPath < y.PkgPath
|
||||
}
|
||||
if x.Object != y.Object {
|
||||
return x.Object < y.Object
|
||||
}
|
||||
tx := reflect.TypeOf(x.Fact)
|
||||
ty := reflect.TypeOf(y.Fact)
|
||||
if tx != ty {
|
||||
return tx.String() < ty.String()
|
||||
}
|
||||
return false // equal
|
||||
})
|
||||
|
||||
var buf bytes.Buffer
|
||||
if len(gobFacts) > 0 {
|
||||
if err := gob.NewEncoder(&buf).Encode(gobFacts); err != nil {
|
||||
// Fact encoding should never fail. Identify the culprit.
|
||||
for _, gf := range gobFacts {
|
||||
if err := gob.NewEncoder(ioutil.Discard).Encode(gf); err != nil {
|
||||
fact := gf.Fact
|
||||
pkgpath := reflect.TypeOf(fact).Elem().PkgPath()
|
||||
log.Panicf("internal error: gob encoding of analysis fact %s failed: %v; please report a bug against fact %T in package %q",
|
||||
fact, err, fact, pkgpath)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if debug {
|
||||
log.Printf("package %q: encode %d facts, %d bytes\n",
|
||||
s.pkg.Path(), len(gobFacts), buf.Len())
|
||||
}
|
||||
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
// String is provided only for debugging, and must not be called
|
||||
// concurrent with any Import/Export method.
|
||||
func (s *Set) String() string {
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString("{")
|
||||
for k, f := range s.m {
|
||||
if buf.Len() > 1 {
|
||||
buf.WriteString(", ")
|
||||
}
|
||||
if k.obj != nil {
|
||||
buf.WriteString(k.obj.String())
|
||||
} else {
|
||||
buf.WriteString(k.pkg.Path())
|
||||
}
|
||||
fmt.Fprintf(&buf, ": %v", f)
|
||||
}
|
||||
buf.WriteString("}")
|
||||
return buf.String()
|
||||
}
|
174
vendor/golang.org/x/tools/go/analysis/internal/facts/facts_test.go
generated
vendored
Normal file
174
vendor/golang.org/x/tools/go/analysis/internal/facts/facts_test.go
generated
vendored
Normal file
@@ -0,0 +1,174 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package facts_test
|
||||
|
||||
import (
|
||||
"encoding/gob"
|
||||
"fmt"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/tools/go/analysis/analysistest"
|
||||
"golang.org/x/tools/go/analysis/internal/facts"
|
||||
"golang.org/x/tools/go/packages"
|
||||
)
|
||||
|
||||
type myFact struct {
|
||||
S string
|
||||
}
|
||||
|
||||
func (f *myFact) String() string { return fmt.Sprintf("myFact(%s)", f.S) }
|
||||
func (f *myFact) AFact() {}
|
||||
|
||||
func TestEncodeDecode(t *testing.T) {
|
||||
gob.Register(new(myFact))
|
||||
|
||||
// c -> b -> a, a2
|
||||
// c does not directly depend on a, but it indirectly uses a.T.
|
||||
//
|
||||
// Package a2 is never loaded directly so it is incomplete.
|
||||
//
|
||||
// We use only types in this example because we rely on
|
||||
// types.Eval to resolve the lookup expressions, and it only
|
||||
// works for types. This is a definite gap in the typechecker API.
|
||||
files := map[string]string{
|
||||
"a/a.go": `package a; type A int; type T int`,
|
||||
"a2/a.go": `package a2; type A2 int; type Unneeded int`,
|
||||
"b/b.go": `package b; import ("a"; "a2"); type B chan a2.A2; type F func() a.T`,
|
||||
"c/c.go": `package c; import "b"; type C []b.B`,
|
||||
}
|
||||
dir, cleanup, err := analysistest.WriteFiles(files)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
// factmap represents the passing of encoded facts from one
|
||||
// package to another. In practice one would use the file system.
|
||||
factmap := make(map[string][]byte)
|
||||
read := func(path string) ([]byte, error) { return factmap[path], nil }
|
||||
|
||||
// In the following table, we analyze packages (a, b, c) in order,
|
||||
// look up various objects accessible within each package,
|
||||
// and see if they have a fact. The "analysis" exports a fact
|
||||
// for every object at package level.
|
||||
//
|
||||
// Note: Loop iterations are not independent test cases;
|
||||
// order matters, as we populate factmap.
|
||||
type lookups []struct {
|
||||
objexpr string
|
||||
want string
|
||||
}
|
||||
for _, test := range []struct {
|
||||
path string
|
||||
lookups lookups
|
||||
}{
|
||||
{"a", lookups{
|
||||
{"A", "myFact(a.A)"},
|
||||
}},
|
||||
{"b", lookups{
|
||||
{"a.A", "myFact(a.A)"},
|
||||
{"a.T", "myFact(a.T)"},
|
||||
{"B", "myFact(b.B)"},
|
||||
{"F", "myFact(b.F)"},
|
||||
{"F(nil)()", "myFact(a.T)"}, // (result type of b.F)
|
||||
}},
|
||||
{"c", lookups{
|
||||
{"b.B", "myFact(b.B)"},
|
||||
{"b.F", "myFact(b.F)"},
|
||||
//{"b.F(nil)()", "myFact(a.T)"}, // no fact; TODO(adonovan): investigate
|
||||
{"C", "myFact(c.C)"},
|
||||
{"C{}[0]", "myFact(b.B)"},
|
||||
{"<-(C{}[0])", "no fact"}, // object but no fact (we never "analyze" a2)
|
||||
}},
|
||||
} {
|
||||
// load package
|
||||
pkg, err := load(dir, test.path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// decode
|
||||
facts, err := facts.Decode(pkg, read)
|
||||
if err != nil {
|
||||
t.Fatalf("Decode failed: %v", err)
|
||||
}
|
||||
if true {
|
||||
t.Logf("decode %s facts = %v", pkg.Path(), facts) // show all facts
|
||||
}
|
||||
|
||||
// export
|
||||
// (one fact for each package-level object)
|
||||
scope := pkg.Scope()
|
||||
for _, name := range scope.Names() {
|
||||
obj := scope.Lookup(name)
|
||||
fact := &myFact{obj.Pkg().Name() + "." + obj.Name()}
|
||||
facts.ExportObjectFact(obj, fact)
|
||||
}
|
||||
|
||||
// import
|
||||
// (after export, because an analyzer may import its own facts)
|
||||
for _, lookup := range test.lookups {
|
||||
fact := new(myFact)
|
||||
var got string
|
||||
if obj := find(pkg, lookup.objexpr); obj == nil {
|
||||
got = "no object"
|
||||
} else if facts.ImportObjectFact(obj, fact) {
|
||||
got = fact.String()
|
||||
} else {
|
||||
got = "no fact"
|
||||
}
|
||||
if got != lookup.want {
|
||||
t.Errorf("in %s, ImportObjectFact(%s, %T) = %s, want %s",
|
||||
pkg.Path(), lookup.objexpr, fact, got, lookup.want)
|
||||
}
|
||||
}
|
||||
|
||||
// encode
|
||||
factmap[pkg.Path()] = facts.Encode()
|
||||
}
|
||||
}
|
||||
|
||||
func find(p *types.Package, expr string) types.Object {
|
||||
// types.Eval only allows us to compute a TypeName object for an expression.
|
||||
// TODO(adonovan): support other expressions that denote an object:
|
||||
// - an identifier (or qualified ident) for a func, const, or var
|
||||
// - new(T).f for a field or method
|
||||
// I've added CheckExpr in https://go-review.googlesource.com/c/go/+/144677.
|
||||
// If that becomes available, use it.
|
||||
|
||||
// Choose an arbitrary position within the (single-file) package
|
||||
// so that we are within the scope of its import declarations.
|
||||
somepos := p.Scope().Lookup(p.Scope().Names()[0]).Pos()
|
||||
tv, err := types.Eval(token.NewFileSet(), p, somepos, expr)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if n, ok := tv.Type.(*types.Named); ok {
|
||||
return n.Obj()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func load(dir string, path string) (*types.Package, error) {
|
||||
cfg := &packages.Config{
|
||||
Mode: packages.LoadSyntax,
|
||||
Dir: dir,
|
||||
Env: append(os.Environ(), "GOPATH="+dir, "GO111MODULE=off", "GOPROXY=off"),
|
||||
}
|
||||
pkgs, err := packages.Load(cfg, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if packages.PrintErrors(pkgs) > 0 {
|
||||
return nil, fmt.Errorf("packages had errors")
|
||||
}
|
||||
if len(pkgs) == 0 {
|
||||
return nil, fmt.Errorf("no package matched %s", path)
|
||||
}
|
||||
return pkgs[0].Types, nil
|
||||
}
|
88
vendor/golang.org/x/tools/go/analysis/internal/facts/imports.go
generated
vendored
Normal file
88
vendor/golang.org/x/tools/go/analysis/internal/facts/imports.go
generated
vendored
Normal file
@@ -0,0 +1,88 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package facts
|
||||
|
||||
import "go/types"
|
||||
|
||||
// importMap computes the import map for a package by traversing the
|
||||
// entire exported API each of its imports.
|
||||
//
|
||||
// This is a workaround for the fact that we cannot access the map used
|
||||
// internally by the types.Importer returned by go/importer. The entries
|
||||
// in this map are the packages and objects that may be relevant to the
|
||||
// current analysis unit.
|
||||
//
|
||||
// Packages in the map that are only indirectly imported may be
|
||||
// incomplete (!pkg.Complete()).
|
||||
//
|
||||
func importMap(imports []*types.Package) map[string]*types.Package {
|
||||
objects := make(map[types.Object]bool)
|
||||
packages := make(map[string]*types.Package)
|
||||
|
||||
var addObj func(obj types.Object) bool
|
||||
var addType func(T types.Type)
|
||||
|
||||
addObj = func(obj types.Object) bool {
|
||||
if !objects[obj] {
|
||||
objects[obj] = true
|
||||
addType(obj.Type())
|
||||
if pkg := obj.Pkg(); pkg != nil {
|
||||
packages[pkg.Path()] = pkg
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
addType = func(T types.Type) {
|
||||
switch T := T.(type) {
|
||||
case *types.Basic:
|
||||
// nop
|
||||
case *types.Named:
|
||||
if addObj(T.Obj()) {
|
||||
for i := 0; i < T.NumMethods(); i++ {
|
||||
addObj(T.Method(i))
|
||||
}
|
||||
}
|
||||
case *types.Pointer:
|
||||
addType(T.Elem())
|
||||
case *types.Slice:
|
||||
addType(T.Elem())
|
||||
case *types.Array:
|
||||
addType(T.Elem())
|
||||
case *types.Chan:
|
||||
addType(T.Elem())
|
||||
case *types.Map:
|
||||
addType(T.Key())
|
||||
addType(T.Elem())
|
||||
case *types.Signature:
|
||||
addType(T.Params())
|
||||
addType(T.Results())
|
||||
case *types.Struct:
|
||||
for i := 0; i < T.NumFields(); i++ {
|
||||
addObj(T.Field(i))
|
||||
}
|
||||
case *types.Tuple:
|
||||
for i := 0; i < T.Len(); i++ {
|
||||
addObj(T.At(i))
|
||||
}
|
||||
case *types.Interface:
|
||||
for i := 0; i < T.NumMethods(); i++ {
|
||||
addObj(T.Method(i))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, imp := range imports {
|
||||
packages[imp.Path()] = imp
|
||||
|
||||
scope := imp.Scope()
|
||||
for _, name := range scope.Names() {
|
||||
addObj(scope.Lookup(name))
|
||||
}
|
||||
}
|
||||
|
||||
return packages
|
||||
}
|
Reference in New Issue
Block a user