Update vendored dependencies

This commit is contained in:
Christian Huffman
2020-07-23 15:19:22 -04:00
parent 547e88e4fb
commit bda8f8c0ae
909 changed files with 119096 additions and 130549 deletions

View File

@@ -16,6 +16,7 @@ import (
"os"
"path/filepath"
"strings"
"time"
"golang.org/x/tools/internal/fastwalk"
)
@@ -83,8 +84,9 @@ func walkDir(root Root, add func(Root, string), skip func(root Root, dir string)
}
return
}
start := time.Now()
if opts.Debug {
log.Printf("scanning %s", root.Path)
log.Printf("gopathwalk: scanning %s", root.Path)
}
w := &walker{
root: root,
@@ -98,7 +100,7 @@ func walkDir(root Root, add func(Root, string), skip func(root Root, dir string)
}
if opts.Debug {
log.Printf("scanned %s", root.Path)
log.Printf("gopathwalk: scanned %s in %v", root.Path, time.Since(start))
}
}

View File

@@ -17,6 +17,7 @@ import (
"os/exec"
"path"
"path/filepath"
"reflect"
"sort"
"strconv"
"strings"
@@ -475,9 +476,9 @@ func (p *pass) assumeSiblingImportsValid() {
}
for left, rights := range refs {
if imp, ok := importsByName[left]; ok {
if _, ok := stdlib[imp.ImportPath]; ok {
if m, ok := stdlib[imp.ImportPath]; ok {
// We have the stdlib in memory; no need to guess.
rights = stdlib[imp.ImportPath]
rights = copyExports(m)
}
p.addCandidate(imp, &packageInfo{
// no name; we already know it.
@@ -584,29 +585,133 @@ func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv
return fixes, nil
}
// getCandidatePkgs returns the list of pkgs that are accessible from filename,
// optionall filtered to only packages named pkgName.
func getCandidatePkgs(pkgName, filename string, env *ProcessEnv) ([]*pkg, error) {
// TODO(heschi): filter out current package. (Don't forget x_test can import x.)
var result []*pkg
// Start off with the standard library.
for importPath := range stdlib {
if pkgName != "" && path.Base(importPath) != pkgName {
continue
}
result = append(result, &pkg{
dir: filepath.Join(env.GOROOT, "src", importPath),
importPathShort: importPath,
packageName: path.Base(importPath),
relevance: 0,
})
}
// Exclude goroot results -- getting them is relatively expensive, not cached,
// and generally redundant with the in-memory version.
exclude := []gopathwalk.RootType{gopathwalk.RootGOROOT}
// Only the go/packages resolver uses the first argument, and nobody uses that resolver.
scannedPkgs, err := env.GetResolver().scan(nil, true, exclude)
if err != nil {
return nil, err
}
dupCheck := map[string]struct{}{}
for _, pkg := range scannedPkgs {
if pkgName != "" && pkg.packageName != pkgName {
continue
}
if !canUse(filename, pkg.dir) {
continue
}
if _, ok := dupCheck[pkg.importPathShort]; ok {
continue
}
dupCheck[pkg.importPathShort] = struct{}{}
result = append(result, pkg)
}
// Sort first by relevance, then by package name, with import path as a tiebreaker.
sort.Slice(result, func(i, j int) bool {
pi, pj := result[i], result[j]
if pi.relevance != pj.relevance {
return pi.relevance < pj.relevance
}
if pi.packageName != pj.packageName {
return pi.packageName < pj.packageName
}
return pi.importPathShort < pj.importPathShort
})
return result, nil
}
func candidateImportName(pkg *pkg) string {
if importPathToAssumedName(pkg.importPathShort) != pkg.packageName {
return pkg.packageName
}
return ""
}
// getAllCandidates gets all of the candidates to be imported, regardless of if they are needed.
func getAllCandidates(filename string, env *ProcessEnv) ([]ImportFix, error) {
// TODO(suzmue): scan for additional candidates and filter out
// current package.
// Get the stdlib candidates and sort by import path.
var paths []string
for importPath := range stdlib {
paths = append(paths, importPath)
pkgs, err := getCandidatePkgs("", filename, env)
if err != nil {
return nil, err
}
sort.Strings(paths)
var imports []ImportFix
for _, importPath := range paths {
imports = append(imports, ImportFix{
result := make([]ImportFix, 0, len(pkgs))
for _, pkg := range pkgs {
result = append(result, ImportFix{
StmtInfo: ImportInfo{
ImportPath: importPath,
ImportPath: pkg.importPathShort,
Name: candidateImportName(pkg),
},
IdentName: path.Base(importPath),
IdentName: pkg.packageName,
FixType: AddImport,
})
}
return imports, nil
return result, nil
}
// A PackageExport is a package and its exports.
type PackageExport struct {
Fix *ImportFix
Exports []string
}
func getPackageExports(completePackage, filename string, env *ProcessEnv) ([]PackageExport, error) {
pkgs, err := getCandidatePkgs(completePackage, filename, env)
if err != nil {
return nil, err
}
results := make([]PackageExport, 0, len(pkgs))
for _, pkg := range pkgs {
fix := &ImportFix{
StmtInfo: ImportInfo{
ImportPath: pkg.importPathShort,
Name: candidateImportName(pkg),
},
IdentName: pkg.packageName,
FixType: AddImport,
}
var exports []string
if e, ok := stdlib[pkg.importPathShort]; ok {
exports = e
} else {
exports, err = loadExportsForPackage(context.Background(), env, completePackage, pkg)
if err != nil {
if env.Debug {
env.Logf("while completing %q, error loading exports from %q: %v", completePackage, pkg.importPathShort, err)
}
continue
}
}
sort.Strings(exports)
results = append(results, PackageExport{
Fix: fix,
Exports: exports,
})
}
return results, nil
}
// ProcessEnv contains environment variables and settings that affect the use of
@@ -678,6 +783,13 @@ func (e *ProcessEnv) buildContext() *build.Context {
ctx := build.Default
ctx.GOROOT = e.GOROOT
ctx.GOPATH = e.GOPATH
// As of Go 1.14, build.Context has a WorkingDir field
// (see golang.org/issue/34860).
// Populate it only if present.
if wd := reflect.ValueOf(&ctx).Elem().FieldByName("WorkingDir"); wd.IsValid() && wd.Kind() == reflect.String {
wd.SetString(e.WorkingDir)
}
return &ctx
}
@@ -712,9 +824,10 @@ func cmdDebugStr(cmd *exec.Cmd) string {
func addStdlibCandidates(pass *pass, refs references) {
add := func(pkg string) {
exports := copyExports(stdlib[pkg])
pass.addCandidate(
&ImportInfo{ImportPath: pkg},
&packageInfo{name: path.Base(pkg), exports: stdlib[pkg]})
&packageInfo{name: path.Base(pkg), exports: exports})
}
for left := range refs {
if left == "rand" {
@@ -735,12 +848,15 @@ func addStdlibCandidates(pass *pass, refs references) {
type Resolver interface {
// loadPackageNames loads the package names in importPaths.
loadPackageNames(importPaths []string, srcDir string) (map[string]string, error)
// scan finds (at least) the packages satisfying refs. The returned slice is unordered.
scan(refs references) ([]*pkg, error)
// scan finds (at least) the packages satisfying refs. If loadNames is true,
// package names will be set on the results, and dirs whose package name
// could not be determined will be excluded.
scan(refs references, loadNames bool, exclude []gopathwalk.RootType) ([]*pkg, error)
// loadExports returns the set of exported symbols in the package at dir.
// It returns an error if the package name in dir does not match expectPackage.
// loadExports may be called concurrently.
loadExports(ctx context.Context, expectPackage string, pkg *pkg) (map[string]bool, error)
loadExports(ctx context.Context, pkg *pkg) (string, []string, error)
ClearForNewScan()
}
// gopackagesResolver implements resolver for GOPATH and module workspaces using go/packages.
@@ -748,6 +864,8 @@ type goPackagesResolver struct {
env *ProcessEnv
}
func (r *goPackagesResolver) ClearForNewScan() {}
func (r *goPackagesResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) {
if len(importPaths) == 0 {
return nil, nil
@@ -772,7 +890,7 @@ func (r *goPackagesResolver) loadPackageNames(importPaths []string, srcDir strin
}
func (r *goPackagesResolver) scan(refs references) ([]*pkg, error) {
func (r *goPackagesResolver) scan(refs references, _ bool, _ []gopathwalk.RootType) ([]*pkg, error) {
var loadQueries []string
for pkgName := range refs {
loadQueries = append(loadQueries, "iamashamedtousethedisabledqueryname="+pkgName)
@@ -790,33 +908,34 @@ func (r *goPackagesResolver) scan(refs references) ([]*pkg, error) {
dir: filepath.Dir(goPackage.CompiledGoFiles[0]),
importPathShort: VendorlessPath(goPackage.PkgPath),
goPackage: goPackage,
packageName: goPackage.Name,
})
}
return scan, nil
}
func (r *goPackagesResolver) loadExports(ctx context.Context, expectPackage string, pkg *pkg) (map[string]bool, error) {
func (r *goPackagesResolver) loadExports(ctx context.Context, pkg *pkg) (string, []string, error) {
if pkg.goPackage == nil {
return nil, fmt.Errorf("goPackage not set")
return "", nil, fmt.Errorf("goPackage not set")
}
exports := map[string]bool{}
var exports []string
fset := token.NewFileSet()
for _, fname := range pkg.goPackage.CompiledGoFiles {
f, err := parser.ParseFile(fset, fname, nil, 0)
if err != nil {
return nil, fmt.Errorf("parsing %s: %v", fname, err)
return "", nil, fmt.Errorf("parsing %s: %v", fname, err)
}
for name := range f.Scope.Objects {
if ast.IsExported(name) {
exports[name] = true
exports = append(exports, name)
}
}
}
return exports, nil
return pkg.goPackage.Name, exports, nil
}
func addExternalCandidates(pass *pass, refs references, filename string) error {
dirScan, err := pass.env.GetResolver().scan(refs)
dirScan, err := pass.env.GetResolver().scan(refs, false, nil)
if err != nil {
return err
}
@@ -914,10 +1033,24 @@ func importPathToAssumedName(importPath string) string {
// gopathResolver implements resolver for GOPATH workspaces.
type gopathResolver struct {
env *ProcessEnv
env *ProcessEnv
cache *dirInfoCache
}
func (r *gopathResolver) init() {
if r.cache == nil {
r.cache = &dirInfoCache{
dirs: map[string]*directoryPackageInfo{},
}
}
}
func (r *gopathResolver) ClearForNewScan() {
r.cache = nil
}
func (r *gopathResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) {
r.init()
names := map[string]string{}
for _, path := range importPaths {
names[path] = importPathToName(r.env, path, srcDir)
@@ -1000,6 +1133,8 @@ type pkg struct {
goPackage *packages.Package
dir string // absolute file path to pkg directory ("/usr/lib/go/src/net/http")
importPathShort string // vendorless import path ("net/http", "a/b")
packageName string // package name loaded from source if requested
relevance int // a weakly-defined score of how relevant a package is. 0 is most relevant.
}
type pkgDistance struct {
@@ -1043,32 +1178,74 @@ func distance(basepath, targetpath string) int {
return strings.Count(p, string(filepath.Separator)) + 1
}
func (r *gopathResolver) scan(_ references) ([]*pkg, error) {
dupCheck := make(map[string]bool)
var result []*pkg
var mu sync.Mutex
func (r *gopathResolver) scan(_ references, loadNames bool, exclude []gopathwalk.RootType) ([]*pkg, error) {
r.init()
add := func(root gopathwalk.Root, dir string) {
mu.Lock()
defer mu.Unlock()
if _, dup := dupCheck[dir]; dup {
// We assume cached directories have not changed. We can skip them and their
// children.
if _, ok := r.cache.Load(dir); ok {
return
}
dupCheck[dir] = true
importpath := filepath.ToSlash(dir[len(root.Path)+len("/"):])
result = append(result, &pkg{
importPathShort: VendorlessPath(importpath),
dir: dir,
})
info := directoryPackageInfo{
status: directoryScanned,
dir: dir,
rootType: root.Type,
nonCanonicalImportPath: VendorlessPath(importpath),
}
r.cache.Store(dir, info)
}
roots := filterRoots(gopathwalk.SrcDirsRoots(r.env.buildContext()), exclude)
gopathwalk.Walk(roots, add, gopathwalk.Options{Debug: r.env.Debug, ModulesEnabled: false})
var result []*pkg
for _, dir := range r.cache.Keys() {
info, ok := r.cache.Load(dir)
if !ok {
continue
}
if loadNames {
var err error
info, err = r.cache.CachePackageName(info)
if err != nil {
continue
}
}
p := &pkg{
importPathShort: info.nonCanonicalImportPath,
dir: dir,
relevance: 1,
packageName: info.packageName,
}
if info.rootType == gopathwalk.RootGOROOT {
p.relevance = 0
}
result = append(result, p)
}
gopathwalk.Walk(gopathwalk.SrcDirsRoots(r.env.buildContext()), add, gopathwalk.Options{Debug: r.env.Debug, ModulesEnabled: false})
return result, nil
}
func (r *gopathResolver) loadExports(ctx context.Context, expectPackage string, pkg *pkg) (map[string]bool, error) {
return loadExportsFromFiles(ctx, r.env, expectPackage, pkg.dir)
func filterRoots(roots []gopathwalk.Root, exclude []gopathwalk.RootType) []gopathwalk.Root {
var result []gopathwalk.Root
outer:
for _, root := range roots {
for _, i := range exclude {
if i == root.Type {
continue outer
}
}
result = append(result, root)
}
return result
}
func (r *gopathResolver) loadExports(ctx context.Context, pkg *pkg) (string, []string, error) {
r.init()
if info, ok := r.cache.Load(pkg.dir); ok {
return r.cache.CacheExports(ctx, r.env, info)
}
return loadExportsFromFiles(ctx, r.env, pkg.dir)
}
// VendorlessPath returns the devendorized version of the import path ipath.
@@ -1084,13 +1261,13 @@ func VendorlessPath(ipath string) string {
return ipath
}
func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, expectPackage string, dir string) (map[string]bool, error) {
exports := make(map[string]bool)
func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string) (string, []string, error) {
var exports []string
// Look for non-test, buildable .go files which could provide exports.
all, err := ioutil.ReadDir(dir)
if err != nil {
return nil, err
return "", nil, err
}
var files []os.FileInfo
for _, fi := range all {
@@ -1106,47 +1283,42 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, expectPackage st
}
if len(files) == 0 {
return nil, fmt.Errorf("dir %v contains no buildable, non-test .go files", dir)
return "", nil, fmt.Errorf("dir %v contains no buildable, non-test .go files", dir)
}
var pkgName string
fset := token.NewFileSet()
for _, fi := range files {
select {
case <-ctx.Done():
return nil, ctx.Err()
return "", nil, ctx.Err()
default:
}
fullFile := filepath.Join(dir, fi.Name())
f, err := parser.ParseFile(fset, fullFile, nil, 0)
if err != nil {
return nil, fmt.Errorf("parsing %s: %v", fullFile, err)
return "", nil, fmt.Errorf("parsing %s: %v", fullFile, err)
}
pkgName := f.Name.Name
if pkgName == "documentation" {
if f.Name.Name == "documentation" {
// Special case from go/build.ImportDir, not
// handled by MatchFile above.
continue
}
if pkgName != expectPackage {
return nil, fmt.Errorf("scan of dir %v is not expected package %v (actually %v)", dir, expectPackage, pkgName)
}
pkgName = f.Name.Name
for name := range f.Scope.Objects {
if ast.IsExported(name) {
exports[name] = true
exports = append(exports, name)
}
}
}
if env.Debug {
exportList := make([]string, 0, len(exports))
for k := range exports {
exportList = append(exportList, k)
}
sort.Strings(exportList)
env.Logf("loaded exports in dir %v (package %v): %v", dir, expectPackage, strings.Join(exportList, ", "))
sortedExports := append([]string(nil), exports...)
sort.Strings(sortedExports)
env.Logf("loaded exports in dir %v (package %v): %v", dir, pkgName, strings.Join(sortedExports, ", "))
}
return exports, nil
return pkgName, exports, nil
}
// findImport searches for a package with the given symbols.
@@ -1221,7 +1393,7 @@ func findImport(ctx context.Context, pass *pass, dirScan []*pkg, pkgName string,
if pass.env.Debug {
pass.env.Logf("loading exports in dir %s (seeking package %s)", c.pkg.dir, pkgName)
}
exports, err := pass.env.GetResolver().loadExports(ctx, pkgName, c.pkg)
exports, err := loadExportsForPackage(ctx, pass.env, pkgName, c.pkg)
if err != nil {
if pass.env.Debug {
pass.env.Logf("loading exports in dir %s (seeking package %s): %v", c.pkg.dir, pkgName, err)
@@ -1230,10 +1402,15 @@ func findImport(ctx context.Context, pass *pass, dirScan []*pkg, pkgName string,
return
}
exportsMap := make(map[string]bool, len(exports))
for _, sym := range exports {
exportsMap[sym] = true
}
// If it doesn't have the right
// symbols, send nil to mean no match.
for symbol := range symbols {
if !exports[symbol] {
if !exportsMap[symbol] {
resc <- nil
return
}
@@ -1253,6 +1430,17 @@ func findImport(ctx context.Context, pass *pass, dirScan []*pkg, pkgName string,
return nil, nil
}
func loadExportsForPackage(ctx context.Context, env *ProcessEnv, expectPkg string, pkg *pkg) ([]string, error) {
pkgName, exports, err := env.GetResolver().loadExports(ctx, pkg)
if err != nil {
return nil, err
}
if expectPkg != pkgName {
return nil, fmt.Errorf("dir %v is package %v, wanted %v", pkg.dir, pkgName, expectPkg)
}
return exports, err
}
// pkgIsCandidate reports whether pkg is a candidate for satisfying the
// finding which package pkgIdent in the file named by filename is trying
// to refer to.
@@ -1383,3 +1571,11 @@ type visitFn func(node ast.Node) ast.Visitor
func (fn visitFn) Visit(node ast.Node) ast.Visitor {
return fn(node)
}
func copyExports(pkg []string) map[string]bool {
m := make(map[string]bool, len(pkg))
for _, v := range pkg {
m[v] = true
}
return m
}

View File

@@ -105,13 +105,22 @@ func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options) (
// GetAllCandidates gets all of the standard library candidate packages to import in
// sorted order on import path.
func GetAllCandidates(filename string, opt *Options) (pkgs []ImportFix, err error) {
_, opt, err = initialize(filename, []byte{}, opt)
_, opt, err = initialize(filename, nil, opt)
if err != nil {
return nil, err
}
return getAllCandidates(filename, opt.Env)
}
// GetPackageExports returns all known packages with name pkg and their exports.
func GetPackageExports(pkg, filename string, opt *Options) (exports []PackageExport, err error) {
_, opt, err = initialize(filename, nil, opt)
if err != nil {
return nil, err
}
return getPackageExports(pkg, filename, opt.Env)
}
// initialize sets the values for opt and src.
// If they are provided, they are not changed. Otherwise opt is set to the
// default values and src is read from the file system.

View File

@@ -14,10 +14,10 @@ import (
"strconv"
"strings"
"sync"
"time"
"golang.org/x/tools/internal/gopathwalk"
"golang.org/x/tools/internal/module"
"golang.org/x/tools/internal/semver"
)
// ModuleResolver implements resolver for modules using the go command as little
@@ -25,38 +25,81 @@ import (
type ModuleResolver struct {
env *ProcessEnv
moduleCacheDir string
dummyVendorMod *ModuleJSON // If vendoring is enabled, the pseudo-module that represents the /vendor directory.
Initialized bool
Main *ModuleJSON
ModsByModPath []*ModuleJSON // All modules, ordered by # of path components in module Path...
ModsByDir []*ModuleJSON // ...or Dir.
// moduleCacheInfo stores information about the module cache.
moduleCacheInfo *moduleCacheInfo
// moduleCacheCache stores information about the module cache.
moduleCacheCache *dirInfoCache
otherCache *dirInfoCache
}
type ModuleJSON struct {
Path string // module path
Version string // module version
Versions []string // available module versions (with -versions)
Replace *ModuleJSON // replaced by this module
Time *time.Time // time version was created
Update *ModuleJSON // available update, if any (with -u)
Main bool // is this the main module?
Indirect bool // is this module only an indirect dependency of main module?
Dir string // directory holding files for this module, if any
GoMod string // path to go.mod file for this module, if any
Error *ModuleErrorJSON // error loading module
}
type ModuleErrorJSON struct {
Err string // the error itself
Path string // module path
Replace *ModuleJSON // replaced by this module
Main bool // is this the main module?
Dir string // directory holding files for this module, if any
GoMod string // path to go.mod file for this module, if any
GoVersion string // go version used in module
}
func (r *ModuleResolver) init() error {
if r.Initialized {
return nil
}
mainMod, vendorEnabled, err := vendorEnabled(r.env)
if err != nil {
return err
}
if mainMod != nil && vendorEnabled {
// Vendor mode is on, so all the non-Main modules are irrelevant,
// and we need to search /vendor for everything.
r.Main = mainMod
r.dummyVendorMod = &ModuleJSON{
Path: "",
Dir: filepath.Join(mainMod.Dir, "vendor"),
}
r.ModsByModPath = []*ModuleJSON{mainMod, r.dummyVendorMod}
r.ModsByDir = []*ModuleJSON{mainMod, r.dummyVendorMod}
} else {
// Vendor mode is off, so run go list -m ... to find everything.
r.initAllMods()
}
r.moduleCacheDir = filepath.Join(filepath.SplitList(r.env.GOPATH)[0], "/pkg/mod")
sort.Slice(r.ModsByModPath, func(i, j int) bool {
count := func(x int) int {
return strings.Count(r.ModsByModPath[x].Path, "/")
}
return count(j) < count(i) // descending order
})
sort.Slice(r.ModsByDir, func(i, j int) bool {
count := func(x int) int {
return strings.Count(r.ModsByDir[x].Dir, "/")
}
return count(j) < count(i) // descending order
})
if r.moduleCacheCache == nil {
r.moduleCacheCache = &dirInfoCache{
dirs: map[string]*directoryPackageInfo{},
}
}
if r.otherCache == nil {
r.otherCache = &dirInfoCache{
dirs: map[string]*directoryPackageInfo{},
}
}
r.Initialized = true
return nil
}
func (r *ModuleResolver) initAllMods() error {
stdout, err := r.env.invokeGo("list", "-m", "-json", "...")
if err != nil {
return err
@@ -79,30 +122,23 @@ func (r *ModuleResolver) init() error {
r.Main = mod
}
}
sort.Slice(r.ModsByModPath, func(i, j int) bool {
count := func(x int) int {
return strings.Count(r.ModsByModPath[x].Path, "/")
}
return count(j) < count(i) // descending order
})
sort.Slice(r.ModsByDir, func(i, j int) bool {
count := func(x int) int {
return strings.Count(r.ModsByDir[x].Dir, "/")
}
return count(j) < count(i) // descending order
})
if r.moduleCacheInfo == nil {
r.moduleCacheInfo = &moduleCacheInfo{
modCacheDirInfo: make(map[string]*directoryPackageInfo),
}
}
r.Initialized = true
return nil
}
func (r *ModuleResolver) ClearForNewScan() {
r.otherCache = &dirInfoCache{
dirs: map[string]*directoryPackageInfo{},
}
}
func (r *ModuleResolver) ClearForNewMod() {
env := r.env
*r = ModuleResolver{
env: env,
}
r.init()
}
// findPackage returns the module and directory that contains the package at
// the given import path, or returns nil, "" if no module is in scope.
func (r *ModuleResolver) findPackage(importPath string) (*ModuleJSON, string) {
@@ -118,22 +154,31 @@ func (r *ModuleResolver) findPackage(importPath string) (*ModuleJSON, string) {
continue
}
if info, ok := r.moduleCacheInfo.Load(pkgDir); ok {
if packageScanned, err := info.reachedStatus(directoryScanned); packageScanned {
if info, ok := r.cacheLoad(pkgDir); ok {
if loaded, err := info.reachedStatus(nameLoaded); loaded {
if err != nil {
// There was some error with scanning this directory.
// It does not contain a valid package.
continue
continue // No package in this dir.
}
return m, pkgDir
}
if scanned, err := info.reachedStatus(directoryScanned); scanned && err != nil {
continue // Dir is unreadable, etc.
}
// This is slightly wrong: a directory doesn't have to have an
// importable package to count as a package for package-to-module
// resolution. package main or _test files should count but
// don't.
// TODO(heschi): fix this.
if _, err := r.cachePackageName(info); err == nil {
return m, pkgDir
}
}
// Not cached. Read the filesystem.
pkgFiles, err := ioutil.ReadDir(pkgDir)
if err != nil {
continue
}
// A module only contains a package if it has buildable go
// files in that directory. If not, it could be provided by an
// outer module. See #29736.
@@ -146,6 +191,40 @@ func (r *ModuleResolver) findPackage(importPath string) (*ModuleJSON, string) {
return nil, ""
}
func (r *ModuleResolver) cacheLoad(dir string) (directoryPackageInfo, bool) {
if info, ok := r.moduleCacheCache.Load(dir); ok {
return info, ok
}
return r.otherCache.Load(dir)
}
func (r *ModuleResolver) cacheStore(info directoryPackageInfo) {
if info.rootType == gopathwalk.RootModuleCache {
r.moduleCacheCache.Store(info.dir, info)
} else {
r.otherCache.Store(info.dir, info)
}
}
func (r *ModuleResolver) cacheKeys() []string {
return append(r.moduleCacheCache.Keys(), r.otherCache.Keys()...)
}
// cachePackageName caches the package name for a dir already in the cache.
func (r *ModuleResolver) cachePackageName(info directoryPackageInfo) (directoryPackageInfo, error) {
if info.rootType == gopathwalk.RootModuleCache {
return r.moduleCacheCache.CachePackageName(info)
}
return r.otherCache.CachePackageName(info)
}
func (r *ModuleResolver) cacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []string, error) {
if info.rootType == gopathwalk.RootModuleCache {
return r.moduleCacheCache.CacheExports(ctx, env, info)
}
return r.otherCache.CacheExports(ctx, env, info)
}
// findModuleByDir returns the module that contains dir, or nil if no such
// module is in scope.
func (r *ModuleResolver) findModuleByDir(dir string) *ModuleJSON {
@@ -184,28 +263,45 @@ func (r *ModuleResolver) dirIsNestedModule(dir string, mod *ModuleJSON) bool {
// so it cannot be a nested module.
return false
}
mf := r.findModFile(dir)
if mf == "" {
if mod != nil && mod == r.dummyVendorMod {
// The /vendor pseudomodule is flattened and doesn't actually count.
return false
}
return filepath.Dir(mf) != mod.Dir
modDir, _ := r.modInfo(dir)
if modDir == "" {
return false
}
return modDir != mod.Dir
}
func (r *ModuleResolver) findModFile(dir string) string {
func (r *ModuleResolver) modInfo(dir string) (modDir string, modName string) {
readModName := func(modFile string) string {
modBytes, err := ioutil.ReadFile(modFile)
if err != nil {
return ""
}
return modulePath(modBytes)
}
if r.dirInModuleCache(dir) {
matches := modCacheRegexp.FindStringSubmatch(dir)
index := strings.Index(dir, matches[1]+"@"+matches[2])
return filepath.Join(dir[:index], matches[1]+"@"+matches[2], "go.mod")
modDir := filepath.Join(dir[:index], matches[1]+"@"+matches[2])
return modDir, readModName(filepath.Join(modDir, "go.mod"))
}
for {
if info, ok := r.cacheLoad(dir); ok {
return info.moduleDir, info.moduleName
}
f := filepath.Join(dir, "go.mod")
info, err := os.Stat(f)
if err == nil && !info.IsDir() {
return f
return dir, readModName(f)
}
d := filepath.Dir(dir)
if len(d) >= len(dir) {
return "" // reached top of file system, no go.mod
return "", "" // reached top of file system, no go.mod
}
dir = d
}
@@ -237,7 +333,7 @@ func (r *ModuleResolver) loadPackageNames(importPaths []string, srcDir string) (
return names, nil
}
func (r *ModuleResolver) scan(_ references) ([]*pkg, error) {
func (r *ModuleResolver) scan(_ references, loadNames bool, exclude []gopathwalk.RootType) ([]*pkg, error) {
if err := r.init(); err != nil {
return nil, err
}
@@ -249,35 +345,30 @@ func (r *ModuleResolver) scan(_ references) ([]*pkg, error) {
if r.Main != nil {
roots = append(roots, gopathwalk.Root{r.Main.Dir, gopathwalk.RootCurrentModule})
}
if r.moduleCacheDir == "" {
r.moduleCacheDir = filepath.Join(filepath.SplitList(r.env.GOPATH)[0], "/pkg/mod")
}
roots = append(roots, gopathwalk.Root{r.moduleCacheDir, gopathwalk.RootModuleCache})
// Walk replace targets, just in case they're not in any of the above.
for _, mod := range r.ModsByModPath {
if mod.Replace != nil {
roots = append(roots, gopathwalk.Root{mod.Dir, gopathwalk.RootOther})
if r.dummyVendorMod != nil {
roots = append(roots, gopathwalk.Root{r.dummyVendorMod.Dir, gopathwalk.RootOther})
} else {
roots = append(roots, gopathwalk.Root{r.moduleCacheDir, gopathwalk.RootModuleCache})
// Walk replace targets, just in case they're not in any of the above.
for _, mod := range r.ModsByModPath {
if mod.Replace != nil {
roots = append(roots, gopathwalk.Root{mod.Dir, gopathwalk.RootOther})
}
}
}
roots = filterRoots(roots, exclude)
var result []*pkg
dupCheck := make(map[string]bool)
var mu sync.Mutex
// Packages in the module cache are immutable. If we have
// already seen this package on a previous scan of the module
// cache, return that result.
// We assume cached directories have not changed. We can skip them and their
// children.
skip := func(root gopathwalk.Root, dir string) bool {
mu.Lock()
defer mu.Unlock()
// If we have already processed this directory on this walk, skip it.
if _, dup := dupCheck[dir]; dup {
return true
}
// If we have saved this directory information, skip it.
info, ok := r.moduleCacheInfo.Load(dir)
info, ok := r.cacheLoad(dir)
if !ok {
return false
}
@@ -288,44 +379,19 @@ func (r *ModuleResolver) scan(_ references) ([]*pkg, error) {
return packageScanned
}
// Add anything new to the cache. We'll process everything in it below.
add := func(root gopathwalk.Root, dir string) {
mu.Lock()
defer mu.Unlock()
if _, dup := dupCheck[dir]; dup {
return
}
info, err := r.scanDirForPackage(root, dir)
if err != nil {
return
}
if root.Type == gopathwalk.RootModuleCache {
// Save this package information in the cache and return.
// Packages from the module cache are added after Walk.
r.moduleCacheInfo.Store(dir, info)
return
}
// Skip this package if there was an error loading package info.
if info.err != nil {
return
}
// The rest of this function canonicalizes the packages using the results
// of initializing the resolver from 'go list -m'.
res, err := r.canonicalize(root.Type, info.nonCanonicalImportPath, info.dir, info.needsReplace)
if err != nil {
return
}
result = append(result, res)
r.cacheStore(r.scanDirForPackage(root, dir))
}
gopathwalk.WalkSkip(roots, add, skip, gopathwalk.Options{Debug: r.env.Debug, ModulesEnabled: true})
// Add the packages from the modules in the mod cache that were skipped.
for _, dir := range r.moduleCacheInfo.Keys() {
info, ok := r.moduleCacheInfo.Load(dir)
// Everything we already had, and everything new, is now in the cache.
for _, dir := range r.cacheKeys() {
info, ok := r.cacheLoad(dir)
if !ok {
continue
}
@@ -335,7 +401,15 @@ func (r *ModuleResolver) scan(_ references) ([]*pkg, error) {
continue
}
res, err := r.canonicalize(gopathwalk.RootModuleCache, info.nonCanonicalImportPath, info.dir, info.needsReplace)
// If we want package names, make sure the cache has them.
if loadNames {
var err error
if info, err = r.cachePackageName(info); err != nil {
continue
}
}
res, err := r.canonicalize(info)
if err != nil {
continue
}
@@ -347,60 +421,70 @@ func (r *ModuleResolver) scan(_ references) ([]*pkg, error) {
// canonicalize gets the result of canonicalizing the packages using the results
// of initializing the resolver from 'go list -m'.
func (r *ModuleResolver) canonicalize(rootType gopathwalk.RootType, importPath, dir string, needsReplace bool) (res *pkg, err error) {
func (r *ModuleResolver) canonicalize(info directoryPackageInfo) (*pkg, error) {
// Packages in GOROOT are already canonical, regardless of the std/cmd modules.
if rootType == gopathwalk.RootGOROOT {
if info.rootType == gopathwalk.RootGOROOT {
return &pkg{
importPathShort: importPath,
dir: dir,
importPathShort: info.nonCanonicalImportPath,
dir: info.dir,
packageName: path.Base(info.nonCanonicalImportPath),
relevance: 0,
}, nil
}
importPath := info.nonCanonicalImportPath
relevance := 2
// Check if the directory is underneath a module that's in scope.
if mod := r.findModuleByDir(dir); mod != nil {
if mod := r.findModuleByDir(info.dir); mod != nil {
relevance = 1
// It is. If dir is the target of a replace directive,
// our guessed import path is wrong. Use the real one.
if mod.Dir == dir {
if mod.Dir == info.dir {
importPath = mod.Path
} else {
dirInMod := dir[len(mod.Dir)+len("/"):]
dirInMod := info.dir[len(mod.Dir)+len("/"):]
importPath = path.Join(mod.Path, filepath.ToSlash(dirInMod))
}
} else if needsReplace {
return nil, fmt.Errorf("needed this package to be in scope: %s", dir)
} else if info.needsReplace {
return nil, fmt.Errorf("package in %q is not valid without a replace statement", info.dir)
}
res := &pkg{
importPathShort: importPath,
dir: info.dir,
packageName: info.packageName, // may not be populated if the caller didn't ask for it
relevance: relevance,
}
// We may have discovered a package that has a different version
// in scope already. Canonicalize to that one if possible.
if _, canonicalDir := r.findPackage(importPath); canonicalDir != "" {
dir = canonicalDir
res.dir = canonicalDir
}
return &pkg{
importPathShort: VendorlessPath(importPath),
dir: dir,
}, nil
return res, nil
}
func (r *ModuleResolver) loadExports(ctx context.Context, expectPackage string, pkg *pkg) (map[string]bool, error) {
func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg) (string, []string, error) {
if err := r.init(); err != nil {
return nil, err
return "", nil, err
}
return loadExportsFromFiles(ctx, r.env, expectPackage, pkg.dir)
if info, ok := r.cacheLoad(pkg.dir); ok {
return r.cacheExports(ctx, r.env, info)
}
return loadExportsFromFiles(ctx, r.env, pkg.dir)
}
func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) (directoryPackageInfo, error) {
func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) directoryPackageInfo {
subdir := ""
if dir != root.Path {
subdir = dir[len(root.Path)+len("/"):]
}
importPath := filepath.ToSlash(subdir)
if strings.HasPrefix(importPath, "vendor/") {
// Ignore vendor dirs. If -mod=vendor is on, then things
// should mostly just work, but when it's not vendor/
// is a mess. There's no easy way to tell if it's on.
// We can still find things in the mod cache and
// map them into /vendor when -mod=vendor is on.
return directoryPackageInfo{}, fmt.Errorf("vendor directory")
// Only enter vendor directories if they're explicitly requested as a root.
return directoryPackageInfo{
status: directoryScanned,
err: fmt.Errorf("unwanted vendor directory"),
}
}
switch root.Type {
case gopathwalk.RootCurrentModule:
@@ -411,7 +495,7 @@ func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) (di
return directoryPackageInfo{
status: directoryScanned,
err: fmt.Errorf("invalid module cache path: %v", subdir),
}, nil
}
}
modPath, err := module.DecodePath(filepath.ToSlash(matches[1]))
if err != nil {
@@ -421,35 +505,34 @@ func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) (di
return directoryPackageInfo{
status: directoryScanned,
err: fmt.Errorf("decoding module cache path %q: %v", subdir, err),
}, nil
}
}
importPath = path.Join(modPath, filepath.ToSlash(matches[3]))
case gopathwalk.RootGOROOT:
importPath = subdir
}
modDir, modName := r.modInfo(dir)
result := directoryPackageInfo{
status: directoryScanned,
dir: dir,
rootType: root.Type,
nonCanonicalImportPath: importPath,
needsReplace: false,
moduleDir: modDir,
moduleName: modName,
}
if root.Type == gopathwalk.RootGOROOT {
// stdlib packages are always in scope, despite the confusing go.mod
return result, nil
return result
}
// Check that this package is not obviously impossible to import.
modFile := r.findModFile(dir)
modBytes, err := ioutil.ReadFile(modFile)
if err == nil && !strings.HasPrefix(importPath, modulePath(modBytes)) {
if !strings.HasPrefix(importPath, modName) {
// The module's declared path does not match
// its expected path. It probably needs a
// replace directive we don't have.
result.needsReplace = true
}
return result, nil
return result
}
// modCacheRegexp splits a path in a module cache into module, module version, and package.
@@ -498,3 +581,63 @@ func modulePath(mod []byte) string {
}
return "" // missing module path
}
var modFlagRegexp = regexp.MustCompile(`-mod[ =](\w+)`)
// vendorEnabled indicates if vendoring is enabled.
// Inspired by setDefaultBuildMod in modload/init.go
func vendorEnabled(env *ProcessEnv) (*ModuleJSON, bool, error) {
mainMod, go114, err := getMainModuleAnd114(env)
if err != nil {
return nil, false, err
}
matches := modFlagRegexp.FindStringSubmatch(env.GOFLAGS)
var modFlag string
if len(matches) != 0 {
modFlag = matches[1]
}
if modFlag != "" {
// Don't override an explicit '-mod=' argument.
return mainMod, modFlag == "vendor", nil
}
if mainMod == nil || !go114 {
return mainMod, false, nil
}
// Check 1.14's automatic vendor mode.
if fi, err := os.Stat(filepath.Join(mainMod.Dir, "vendor")); err == nil && fi.IsDir() {
if mainMod.GoVersion != "" && semver.Compare("v"+mainMod.GoVersion, "v1.14") >= 0 {
// The Go version is at least 1.14, and a vendor directory exists.
// Set -mod=vendor by default.
return mainMod, true, nil
}
}
return mainMod, false, nil
}
// getMainModuleAnd114 gets the main module's information and whether the
// go command in use is 1.14+. This is the information needed to figure out
// if vendoring should be enabled.
func getMainModuleAnd114(env *ProcessEnv) (*ModuleJSON, bool, error) {
const format = `{{.Path}}
{{.Dir}}
{{.GoMod}}
{{.GoVersion}}
{{range context.ReleaseTags}}{{if eq . "go1.14"}}{{.}}{{end}}{{end}}
`
stdout, err := env.invokeGo("list", "-m", "-f", format)
if err != nil {
return nil, false, nil
}
lines := strings.Split(stdout.String(), "\n")
if len(lines) < 5 {
return nil, false, fmt.Errorf("unexpected stdout: %q", stdout)
}
mod := &ModuleJSON{
Path: lines[0],
Dir: lines[1],
GoMod: lines[2],
GoVersion: lines[3],
Main: true,
}
return mod, lines[4] == "go1.14", nil
}

View File

@@ -1,12 +1,13 @@
package imports
import (
"context"
"fmt"
"sync"
"golang.org/x/tools/internal/gopathwalk"
)
// ModuleResolver implements Resolver for modules using the go command as little
// as feasible.
//
// To find packages to import, the resolver needs to know about all of the
// the packages that could be imported. This includes packages that are
// already in modules that are in (1) the current module, (2) replace targets,
@@ -30,6 +31,8 @@ type directoryPackageStatus int
const (
_ directoryPackageStatus = iota
directoryScanned
nameLoaded
exportsLoaded
)
type directoryPackageInfo struct {
@@ -38,17 +41,30 @@ type directoryPackageInfo struct {
// err is non-nil when there was an error trying to reach status.
err error
// Set when status > directoryScanned.
// Set when status >= directoryScanned.
// dir is the absolute directory of this package.
dir string
// nonCanonicalImportPath is the expected import path for this package.
// This may not be an import path that can be used to import this package.
dir string
rootType gopathwalk.RootType
// nonCanonicalImportPath is the package's expected import path. It may
// not actually be importable at that path.
nonCanonicalImportPath string
// needsReplace is true if the nonCanonicalImportPath does not match the
// the modules declared path, making it impossible to import without a
// module's declared path, making it impossible to import without a
// replace directive.
needsReplace bool
// Module-related information.
moduleDir string // The directory that is the module root of this dir.
moduleName string // The module name that contains this dir.
// Set when status >= nameLoaded.
packageName string // the package name, as declared in the source.
// Set when status >= exportsLoaded.
exports []string
}
// reachedStatus returns true when info has a status at least target and any error associated with
@@ -63,8 +79,8 @@ func (info *directoryPackageInfo) reachedStatus(target directoryPackageStatus) (
return true, nil
}
// moduleCacheInfo is a concurrency safe map for storing information about
// the directories in the module cache.
// dirInfoCache is a concurrency safe map for storing information about
// directories that may contain packages.
//
// The information in this cache is built incrementally. Entries are initialized in scan.
// No new keys should be added in any other functions, as all directories containing
@@ -73,37 +89,30 @@ func (info *directoryPackageInfo) reachedStatus(target directoryPackageStatus) (
// Other functions, including loadExports and findPackage, may update entries in this cache
// as they discover new things about the directory.
//
// We do not need to protect the data in the cache for multiple writes, because it only stores
// module cache directories, which do not change. If two competing stores take place, there will be
// one store that wins. Although this could result in a loss of information it will not be incorrect
// and may just result in recomputing the same result later.
// The information in the cache is not expected to change for the cache's
// lifetime, so there is no protection against competing writes. Users should
// take care not to hold the cache across changes to the underlying files.
//
// TODO(suzmue): consider other concurrency strategies and data structures (RWLocks, sync.Map, etc)
type moduleCacheInfo struct {
type dirInfoCache struct {
mu sync.Mutex
// modCacheDirInfo stores information about packages in
// module cache directories. Keyed by absolute directory.
modCacheDirInfo map[string]*directoryPackageInfo
// dirs stores information about packages in directories, keyed by absolute path.
dirs map[string]*directoryPackageInfo
}
// Store stores the package info for dir.
func (d *moduleCacheInfo) Store(dir string, info directoryPackageInfo) {
func (d *dirInfoCache) Store(dir string, info directoryPackageInfo) {
d.mu.Lock()
defer d.mu.Unlock()
d.modCacheDirInfo[dir] = &directoryPackageInfo{
status: info.status,
err: info.err,
dir: info.dir,
nonCanonicalImportPath: info.nonCanonicalImportPath,
needsReplace: info.needsReplace,
}
stored := info // defensive copy
d.dirs[dir] = &stored
}
// Load returns a copy of the directoryPackageInfo for absolute directory dir.
func (d *moduleCacheInfo) Load(dir string) (directoryPackageInfo, bool) {
func (d *dirInfoCache) Load(dir string) (directoryPackageInfo, bool) {
d.mu.Lock()
defer d.mu.Unlock()
info, ok := d.modCacheDirInfo[dir]
info, ok := d.dirs[dir]
if !ok {
return directoryPackageInfo{}, false
}
@@ -111,11 +120,46 @@ func (d *moduleCacheInfo) Load(dir string) (directoryPackageInfo, bool) {
}
// Keys returns the keys currently present in d.
func (d *moduleCacheInfo) Keys() (keys []string) {
func (d *dirInfoCache) Keys() (keys []string) {
d.mu.Lock()
defer d.mu.Unlock()
for key := range d.modCacheDirInfo {
for key := range d.dirs {
keys = append(keys, key)
}
return keys
}
func (d *dirInfoCache) CachePackageName(info directoryPackageInfo) (directoryPackageInfo, error) {
if loaded, err := info.reachedStatus(nameLoaded); loaded {
return info, err
}
if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil {
return info, fmt.Errorf("cannot read package name, scan error: %v", err)
}
info.packageName, info.err = packageDirToName(info.dir)
info.status = nameLoaded
d.Store(info.dir, info)
return info, info.err
}
func (d *dirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []string, error) {
if reached, _ := info.reachedStatus(exportsLoaded); reached {
return info.packageName, info.exports, info.err
}
if reached, err := info.reachedStatus(nameLoaded); reached && err != nil {
return "", nil, err
}
info.packageName, info.exports, info.err = loadExportsFromFiles(ctx, env, info.dir)
if info.err == context.Canceled {
return info.packageName, info.exports, info.err
}
// The cache structure wants things to proceed linearly. We can skip a
// step here, but only if we succeed.
if info.status == nameLoaded || info.err == nil {
info.status = exportsLoaded
} else {
info.status = nameLoaded
}
d.Store(info.dir, info)
return info.packageName, info.exports, info.err
}

File diff suppressed because it is too large Load Diff

100
vendor/golang.org/x/tools/internal/span/parse.go generated vendored Normal file
View File

@@ -0,0 +1,100 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package span
import (
"strconv"
"strings"
"unicode/utf8"
)
// Parse returns the location represented by the input.
// All inputs are valid locations, as they can always be a pure filename.
// The returned span will be normalized, and thus if printed may produce a
// different string.
func Parse(input string) Span {
// :0:0#0-0:0#0
valid := input
var hold, offset int
hadCol := false
suf := rstripSuffix(input)
if suf.sep == "#" {
offset = suf.num
suf = rstripSuffix(suf.remains)
}
if suf.sep == ":" {
valid = suf.remains
hold = suf.num
hadCol = true
suf = rstripSuffix(suf.remains)
}
switch {
case suf.sep == ":":
return New(NewURI(suf.remains), NewPoint(suf.num, hold, offset), Point{})
case suf.sep == "-":
// we have a span, fall out of the case to continue
default:
// separator not valid, rewind to either the : or the start
return New(NewURI(valid), NewPoint(hold, 0, offset), Point{})
}
// only the span form can get here
// at this point we still don't know what the numbers we have mean
// if have not yet seen a : then we might have either a line or a column depending
// on whether start has a column or not
// we build an end point and will fix it later if needed
end := NewPoint(suf.num, hold, offset)
hold, offset = 0, 0
suf = rstripSuffix(suf.remains)
if suf.sep == "#" {
offset = suf.num
suf = rstripSuffix(suf.remains)
}
if suf.sep != ":" {
// turns out we don't have a span after all, rewind
return New(NewURI(valid), end, Point{})
}
valid = suf.remains
hold = suf.num
suf = rstripSuffix(suf.remains)
if suf.sep != ":" {
// line#offset only
return New(NewURI(valid), NewPoint(hold, 0, offset), end)
}
// we have a column, so if end only had one number, it is also the column
if !hadCol {
end = NewPoint(suf.num, end.v.Line, end.v.Offset)
}
return New(NewURI(suf.remains), NewPoint(suf.num, hold, offset), end)
}
type suffix struct {
remains string
sep string
num int
}
func rstripSuffix(input string) suffix {
if len(input) == 0 {
return suffix{"", "", -1}
}
remains := input
num := -1
// first see if we have a number at the end
last := strings.LastIndexFunc(remains, func(r rune) bool { return r < '0' || r > '9' })
if last >= 0 && last < len(remains)-1 {
number, err := strconv.ParseInt(remains[last+1:], 10, 64)
if err == nil {
num = int(number)
remains = remains[:last+1]
}
}
// now see if we have a trailing separator
r, w := utf8.DecodeLastRuneInString(remains)
if r != ':' && r != '#' && r == '#' {
return suffix{input, "", -1}
}
remains = remains[:len(remains)-w]
return suffix{remains, string(r), num}
}

285
vendor/golang.org/x/tools/internal/span/span.go generated vendored Normal file
View File

@@ -0,0 +1,285 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package span contains support for representing with positions and ranges in
// text files.
package span
import (
"encoding/json"
"fmt"
"path"
)
// Span represents a source code range in standardized form.
type Span struct {
v span
}
// Point represents a single point within a file.
// In general this should only be used as part of a Span, as on its own it
// does not carry enough information.
type Point struct {
v point
}
type span struct {
URI URI `json:"uri"`
Start point `json:"start"`
End point `json:"end"`
}
type point struct {
Line int `json:"line"`
Column int `json:"column"`
Offset int `json:"offset"`
}
// Invalid is a span that reports false from IsValid
var Invalid = Span{v: span{Start: invalidPoint.v, End: invalidPoint.v}}
var invalidPoint = Point{v: point{Line: 0, Column: 0, Offset: -1}}
// Converter is the interface to an object that can convert between line:column
// and offset forms for a single file.
type Converter interface {
//ToPosition converts from an offset to a line:column pair.
ToPosition(offset int) (int, int, error)
//ToOffset converts from a line:column pair to an offset.
ToOffset(line, col int) (int, error)
}
func New(uri URI, start Point, end Point) Span {
s := Span{v: span{URI: uri, Start: start.v, End: end.v}}
s.v.clean()
return s
}
func NewPoint(line, col, offset int) Point {
p := Point{v: point{Line: line, Column: col, Offset: offset}}
p.v.clean()
return p
}
func Compare(a, b Span) int {
if r := CompareURI(a.URI(), b.URI()); r != 0 {
return r
}
if r := comparePoint(a.v.Start, b.v.Start); r != 0 {
return r
}
return comparePoint(a.v.End, b.v.End)
}
func ComparePoint(a, b Point) int {
return comparePoint(a.v, b.v)
}
func comparePoint(a, b point) int {
if !a.hasPosition() {
if a.Offset < b.Offset {
return -1
}
if a.Offset > b.Offset {
return 1
}
return 0
}
if a.Line < b.Line {
return -1
}
if a.Line > b.Line {
return 1
}
if a.Column < b.Column {
return -1
}
if a.Column > b.Column {
return 1
}
return 0
}
func (s Span) HasPosition() bool { return s.v.Start.hasPosition() }
func (s Span) HasOffset() bool { return s.v.Start.hasOffset() }
func (s Span) IsValid() bool { return s.v.Start.isValid() }
func (s Span) IsPoint() bool { return s.v.Start == s.v.End }
func (s Span) URI() URI { return s.v.URI }
func (s Span) Start() Point { return Point{s.v.Start} }
func (s Span) End() Point { return Point{s.v.End} }
func (s *Span) MarshalJSON() ([]byte, error) { return json.Marshal(&s.v) }
func (s *Span) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &s.v) }
func (p Point) HasPosition() bool { return p.v.hasPosition() }
func (p Point) HasOffset() bool { return p.v.hasOffset() }
func (p Point) IsValid() bool { return p.v.isValid() }
func (p *Point) MarshalJSON() ([]byte, error) { return json.Marshal(&p.v) }
func (p *Point) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &p.v) }
func (p Point) Line() int {
if !p.v.hasPosition() {
panic(fmt.Errorf("position not set in %v", p.v))
}
return p.v.Line
}
func (p Point) Column() int {
if !p.v.hasPosition() {
panic(fmt.Errorf("position not set in %v", p.v))
}
return p.v.Column
}
func (p Point) Offset() int {
if !p.v.hasOffset() {
panic(fmt.Errorf("offset not set in %v", p.v))
}
return p.v.Offset
}
func (p point) hasPosition() bool { return p.Line > 0 }
func (p point) hasOffset() bool { return p.Offset >= 0 }
func (p point) isValid() bool { return p.hasPosition() || p.hasOffset() }
func (p point) isZero() bool {
return (p.Line == 1 && p.Column == 1) || (!p.hasPosition() && p.Offset == 0)
}
func (s *span) clean() {
//this presumes the points are already clean
if !s.End.isValid() || (s.End == point{}) {
s.End = s.Start
}
}
func (p *point) clean() {
if p.Line < 0 {
p.Line = 0
}
if p.Column <= 0 {
if p.Line > 0 {
p.Column = 1
} else {
p.Column = 0
}
}
if p.Offset == 0 && (p.Line > 1 || p.Column > 1) {
p.Offset = -1
}
}
// Format implements fmt.Formatter to print the Location in a standard form.
// The format produced is one that can be read back in using Parse.
func (s Span) Format(f fmt.State, c rune) {
fullForm := f.Flag('+')
preferOffset := f.Flag('#')
// we should always have a uri, simplify if it is file format
//TODO: make sure the end of the uri is unambiguous
uri := string(s.v.URI)
if c == 'f' {
uri = path.Base(uri)
} else if !fullForm {
uri = s.v.URI.Filename()
}
fmt.Fprint(f, uri)
if !s.IsValid() || (!fullForm && s.v.Start.isZero() && s.v.End.isZero()) {
return
}
// see which bits of start to write
printOffset := s.HasOffset() && (fullForm || preferOffset || !s.HasPosition())
printLine := s.HasPosition() && (fullForm || !printOffset)
printColumn := printLine && (fullForm || (s.v.Start.Column > 1 || s.v.End.Column > 1))
fmt.Fprint(f, ":")
if printLine {
fmt.Fprintf(f, "%d", s.v.Start.Line)
}
if printColumn {
fmt.Fprintf(f, ":%d", s.v.Start.Column)
}
if printOffset {
fmt.Fprintf(f, "#%d", s.v.Start.Offset)
}
// start is written, do we need end?
if s.IsPoint() {
return
}
// we don't print the line if it did not change
printLine = fullForm || (printLine && s.v.End.Line > s.v.Start.Line)
fmt.Fprint(f, "-")
if printLine {
fmt.Fprintf(f, "%d", s.v.End.Line)
}
if printColumn {
if printLine {
fmt.Fprint(f, ":")
}
fmt.Fprintf(f, "%d", s.v.End.Column)
}
if printOffset {
fmt.Fprintf(f, "#%d", s.v.End.Offset)
}
}
func (s Span) WithPosition(c Converter) (Span, error) {
if err := s.update(c, true, false); err != nil {
return Span{}, err
}
return s, nil
}
func (s Span) WithOffset(c Converter) (Span, error) {
if err := s.update(c, false, true); err != nil {
return Span{}, err
}
return s, nil
}
func (s Span) WithAll(c Converter) (Span, error) {
if err := s.update(c, true, true); err != nil {
return Span{}, err
}
return s, nil
}
func (s *Span) update(c Converter, withPos, withOffset bool) error {
if !s.IsValid() {
return fmt.Errorf("cannot add information to an invalid span")
}
if withPos && !s.HasPosition() {
if err := s.v.Start.updatePosition(c); err != nil {
return err
}
if s.v.End.Offset == s.v.Start.Offset {
s.v.End = s.v.Start
} else if err := s.v.End.updatePosition(c); err != nil {
return err
}
}
if withOffset && (!s.HasOffset() || (s.v.End.hasPosition() && !s.v.End.hasOffset())) {
if err := s.v.Start.updateOffset(c); err != nil {
return err
}
if s.v.End.Line == s.v.Start.Line && s.v.End.Column == s.v.Start.Column {
s.v.End.Offset = s.v.Start.Offset
} else if err := s.v.End.updateOffset(c); err != nil {
return err
}
}
return nil
}
func (p *point) updatePosition(c Converter) error {
line, col, err := c.ToPosition(p.Offset)
if err != nil {
return err
}
p.Line = line
p.Column = col
return nil
}
func (p *point) updateOffset(c Converter) error {
offset, err := c.ToOffset(p.Line, p.Column)
if err != nil {
return err
}
p.Offset = offset
return nil
}

151
vendor/golang.org/x/tools/internal/span/token.go generated vendored Normal file
View File

@@ -0,0 +1,151 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package span
import (
"fmt"
"go/token"
)
// Range represents a source code range in token.Pos form.
// It also carries the FileSet that produced the positions, so that it is
// self contained.
type Range struct {
FileSet *token.FileSet
Start token.Pos
End token.Pos
}
// TokenConverter is a Converter backed by a token file set and file.
// It uses the file set methods to work out the conversions, which
// makes it fast and does not require the file contents.
type TokenConverter struct {
fset *token.FileSet
file *token.File
}
// NewRange creates a new Range from a FileSet and two positions.
// To represent a point pass a 0 as the end pos.
func NewRange(fset *token.FileSet, start, end token.Pos) Range {
return Range{
FileSet: fset,
Start: start,
End: end,
}
}
// NewTokenConverter returns an implementation of Converter backed by a
// token.File.
func NewTokenConverter(fset *token.FileSet, f *token.File) *TokenConverter {
return &TokenConverter{fset: fset, file: f}
}
// NewContentConverter returns an implementation of Converter for the
// given file content.
func NewContentConverter(filename string, content []byte) *TokenConverter {
fset := token.NewFileSet()
f := fset.AddFile(filename, -1, len(content))
f.SetLinesForContent(content)
return &TokenConverter{fset: fset, file: f}
}
// IsPoint returns true if the range represents a single point.
func (r Range) IsPoint() bool {
return r.Start == r.End
}
// Span converts a Range to a Span that represents the Range.
// It will fill in all the members of the Span, calculating the line and column
// information.
func (r Range) Span() (Span, error) {
f := r.FileSet.File(r.Start)
if f == nil {
return Span{}, fmt.Errorf("file not found in FileSet")
}
s := Span{v: span{URI: FileURI(f.Name())}}
var err error
s.v.Start.Offset, err = offset(f, r.Start)
if err != nil {
return Span{}, err
}
if r.End.IsValid() {
s.v.End.Offset, err = offset(f, r.End)
if err != nil {
return Span{}, err
}
}
s.v.Start.clean()
s.v.End.clean()
s.v.clean()
converter := NewTokenConverter(r.FileSet, f)
return s.WithPosition(converter)
}
// offset is a copy of the Offset function in go/token, but with the adjustment
// that it does not panic on invalid positions.
func offset(f *token.File, pos token.Pos) (int, error) {
if int(pos) < f.Base() || int(pos) > f.Base()+f.Size() {
return 0, fmt.Errorf("invalid pos")
}
return int(pos) - f.Base(), nil
}
// Range converts a Span to a Range that represents the Span for the supplied
// File.
func (s Span) Range(converter *TokenConverter) (Range, error) {
s, err := s.WithOffset(converter)
if err != nil {
return Range{}, err
}
// go/token will panic if the offset is larger than the file's size,
// so check here to avoid panicking.
if s.Start().Offset() > converter.file.Size() {
return Range{}, fmt.Errorf("start offset %v is past the end of the file %v", s.Start(), converter.file.Size())
}
if s.End().Offset() > converter.file.Size() {
return Range{}, fmt.Errorf("end offset %v is past the end of the file %v", s.End(), converter.file.Size())
}
return Range{
FileSet: converter.fset,
Start: converter.file.Pos(s.Start().Offset()),
End: converter.file.Pos(s.End().Offset()),
}, nil
}
func (l *TokenConverter) ToPosition(offset int) (int, int, error) {
if offset > l.file.Size() {
return 0, 0, fmt.Errorf("offset %v is past the end of the file %v", offset, l.file.Size())
}
pos := l.file.Pos(offset)
p := l.fset.Position(pos)
if offset == l.file.Size() {
return p.Line + 1, 1, nil
}
return p.Line, p.Column, nil
}
func (l *TokenConverter) ToOffset(line, col int) (int, error) {
if line < 0 {
return -1, fmt.Errorf("line is not valid")
}
lineMax := l.file.LineCount() + 1
if line > lineMax {
return -1, fmt.Errorf("line is beyond end of file %v", lineMax)
} else if line == lineMax {
if col > 1 {
return -1, fmt.Errorf("column is beyond end of file")
}
// at the end of the file, allowing for a trailing eol
return l.file.Size(), nil
}
pos := lineStart(l.file, line)
if !pos.IsValid() {
return -1, fmt.Errorf("line is not in file")
}
// we assume that column is in bytes here, and that the first byte of a
// line is at column 1
pos += token.Pos(col - 1)
return offset(l.file, pos)
}

39
vendor/golang.org/x/tools/internal/span/token111.go generated vendored Normal file
View File

@@ -0,0 +1,39 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.12
package span
import (
"go/token"
)
// lineStart is the pre-Go 1.12 version of (*token.File).LineStart. For Go
// versions <= 1.11, we borrow logic from the analysisutil package.
// TODO(rstambler): Delete this file when we no longer support Go 1.11.
func lineStart(f *token.File, line int) token.Pos {
// Use binary search to find the start offset of this line.
min := 0 // inclusive
max := f.Size() // exclusive
for {
offset := (min + max) / 2
pos := f.Pos(offset)
posn := f.Position(pos)
if posn.Line == line {
return pos - (token.Pos(posn.Column) - 1)
}
if min+1 >= max {
return token.NoPos
}
if posn.Line < line {
min = offset
} else {
max = offset
}
}
}

16
vendor/golang.org/x/tools/internal/span/token112.go generated vendored Normal file
View File

@@ -0,0 +1,16 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.12
package span
import (
"go/token"
)
// TODO(rstambler): Delete this file when we no longer support Go 1.11.
func lineStart(f *token.File, line int) token.Pos {
return f.LineStart(line)
}

152
vendor/golang.org/x/tools/internal/span/uri.go generated vendored Normal file
View File

@@ -0,0 +1,152 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package span
import (
"fmt"
"net/url"
"os"
"path"
"path/filepath"
"runtime"
"strings"
"unicode"
)
const fileScheme = "file"
// URI represents the full URI for a file.
type URI string
// Filename returns the file path for the given URI.
// It is an error to call this on a URI that is not a valid filename.
func (uri URI) Filename() string {
filename, err := filename(uri)
if err != nil {
panic(err)
}
return filepath.FromSlash(filename)
}
func filename(uri URI) (string, error) {
if uri == "" {
return "", nil
}
u, err := url.ParseRequestURI(string(uri))
if err != nil {
return "", err
}
if u.Scheme != fileScheme {
return "", fmt.Errorf("only file URIs are supported, got %q from %q", u.Scheme, uri)
}
if isWindowsDriveURI(u.Path) {
u.Path = u.Path[1:]
}
return u.Path, nil
}
// NewURI returns a span URI for the string.
// It will attempt to detect if the string is a file path or uri.
func NewURI(s string) URI {
if u, err := url.PathUnescape(s); err == nil {
s = u
}
if strings.HasPrefix(s, fileScheme+"://") {
return URI(s)
}
return FileURI(s)
}
func CompareURI(a, b URI) int {
if equalURI(a, b) {
return 0
}
if a < b {
return -1
}
return 1
}
func equalURI(a, b URI) bool {
if a == b {
return true
}
// If we have the same URI basename, we may still have the same file URIs.
if !strings.EqualFold(path.Base(string(a)), path.Base(string(b))) {
return false
}
fa, err := filename(a)
if err != nil {
return false
}
fb, err := filename(b)
if err != nil {
return false
}
// Stat the files to check if they are equal.
infoa, err := os.Stat(filepath.FromSlash(fa))
if err != nil {
return false
}
infob, err := os.Stat(filepath.FromSlash(fb))
if err != nil {
return false
}
return os.SameFile(infoa, infob)
}
// FileURI returns a span URI for the supplied file path.
// It will always have the file scheme.
func FileURI(path string) URI {
if path == "" {
return ""
}
// Handle standard library paths that contain the literal "$GOROOT".
// TODO(rstambler): The go/packages API should allow one to determine a user's $GOROOT.
const prefix = "$GOROOT"
if len(path) >= len(prefix) && strings.EqualFold(prefix, path[:len(prefix)]) {
suffix := path[len(prefix):]
path = runtime.GOROOT() + suffix
}
if !isWindowsDrivePath(path) {
if abs, err := filepath.Abs(path); err == nil {
path = abs
}
}
// Check the file path again, in case it became absolute.
if isWindowsDrivePath(path) {
path = "/" + path
}
path = filepath.ToSlash(path)
u := url.URL{
Scheme: fileScheme,
Path: path,
}
uri := u.String()
if unescaped, err := url.PathUnescape(uri); err == nil {
uri = unescaped
}
return URI(uri)
}
// isWindowsDrivePath returns true if the file path is of the form used by
// Windows. We check if the path begins with a drive letter, followed by a ":".
func isWindowsDrivePath(path string) bool {
if len(path) < 4 {
return false
}
return unicode.IsLetter(rune(path[0])) && path[1] == ':'
}
// isWindowsDriveURI returns true if the file URI is of the format used by
// Windows URIs. The url.Parse package does not specially handle Windows paths
// (see https://golang.org/issue/6027). We check if the URI path has
// a drive prefix (e.g. "/C:"). If so, we trim the leading "/".
func isWindowsDriveURI(uri string) bool {
if len(uri) < 4 {
return false
}
return uri[0] == '/' && unicode.IsLetter(rune(uri[1])) && uri[2] == ':'
}

94
vendor/golang.org/x/tools/internal/span/utf16.go generated vendored Normal file
View File

@@ -0,0 +1,94 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package span
import (
"fmt"
"unicode/utf16"
"unicode/utf8"
)
// ToUTF16Column calculates the utf16 column expressed by the point given the
// supplied file contents.
// This is used to convert from the native (always in bytes) column
// representation and the utf16 counts used by some editors.
func ToUTF16Column(p Point, content []byte) (int, error) {
if content == nil {
return -1, fmt.Errorf("ToUTF16Column: missing content")
}
if !p.HasPosition() {
return -1, fmt.Errorf("ToUTF16Column: point is missing position")
}
if !p.HasOffset() {
return -1, fmt.Errorf("ToUTF16Column: point is missing offset")
}
offset := p.Offset() // 0-based
colZero := p.Column() - 1 // 0-based
if colZero == 0 {
// 0-based column 0, so it must be chr 1
return 1, nil
} else if colZero < 0 {
return -1, fmt.Errorf("ToUTF16Column: column is invalid (%v)", colZero)
}
// work out the offset at the start of the line using the column
lineOffset := offset - colZero
if lineOffset < 0 || offset > len(content) {
return -1, fmt.Errorf("ToUTF16Column: offsets %v-%v outside file contents (%v)", lineOffset, offset, len(content))
}
// Use the offset to pick out the line start.
// This cannot panic: offset > len(content) and lineOffset < offset.
start := content[lineOffset:]
// Now, truncate down to the supplied column.
start = start[:colZero]
// and count the number of utf16 characters
// in theory we could do this by hand more efficiently...
return len(utf16.Encode([]rune(string(start)))) + 1, nil
}
// FromUTF16Column advances the point by the utf16 character offset given the
// supplied line contents.
// This is used to convert from the utf16 counts used by some editors to the
// native (always in bytes) column representation.
func FromUTF16Column(p Point, chr int, content []byte) (Point, error) {
if !p.HasOffset() {
return Point{}, fmt.Errorf("FromUTF16Column: point is missing offset")
}
// if chr is 1 then no adjustment needed
if chr <= 1 {
return p, nil
}
if p.Offset() >= len(content) {
return p, fmt.Errorf("FromUTF16Column: offset (%v) greater than length of content (%v)", p.Offset(), len(content))
}
remains := content[p.Offset():]
// scan forward the specified number of characters
for count := 1; count < chr; count++ {
if len(remains) <= 0 {
return Point{}, fmt.Errorf("FromUTF16Column: chr goes beyond the content")
}
r, w := utf8.DecodeRune(remains)
if r == '\n' {
// Per the LSP spec:
//
// > If the character value is greater than the line length it
// > defaults back to the line length.
break
}
remains = remains[w:]
if r >= 0x10000 {
// a two point rune
count++
// if we finished in a two point rune, do not advance past the first
if count >= chr {
break
}
}
p.v.Column += w
p.v.Offset += w
}
return p, nil
}