Add generated file

This PR adds generated files under pkg/client and vendor folder.
This commit is contained in:
xing-yang
2018-07-12 10:55:15 -07:00
parent 36b1de0341
commit e213d1890d
17729 changed files with 5090889 additions and 0 deletions

33
vendor/golang.org/x/tools/go/pointer/TODO generated vendored Normal file
View File

@@ -0,0 +1,33 @@
-*- text -*-
Pointer analysis to-do list
===========================
CONSTRAINT GENERATION:
- support reflection:
- a couple of operators are missing
- reflect.Values may contain lvalues (CanAddr)
- implement native intrinsics. These vary by platform.
- add to pts(a.panic) a label representing all runtime panics, e.g.
runtime.{TypeAssertionError,errorString,errorCString}.
OPTIMISATIONS
- pre-solver:
pointer equivalence: extend HVN to HRU
location equivalence
- solver: HCD, LCD.
- experiment with map+slice worklist in lieu of bitset.
It may have faster insert.
MISC:
- Test on all platforms.
Currently we assume these go/build tags: linux, amd64, !cgo.
MAINTAINABILITY
- Think about ways to make debugging this code easier. PTA logs
routinely exceed a million lines and require training to read.
BUGS:
- There's a crash bug in stdlib_test + reflection, rVCallConstraint.

452
vendor/golang.org/x/tools/go/pointer/analysis.go generated vendored Normal file
View File

@@ -0,0 +1,452 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pointer
// This file defines the main datatypes and Analyze function of the pointer analysis.
import (
"fmt"
"go/token"
"go/types"
"io"
"os"
"reflect"
"runtime"
"runtime/debug"
"sort"
"golang.org/x/tools/go/callgraph"
"golang.org/x/tools/go/ssa"
"golang.org/x/tools/go/types/typeutil"
)
const (
// optimization options; enable all when committing
optRenumber = true // enable renumbering optimization (makes logs hard to read)
optHVN = true // enable pointer equivalence via Hash-Value Numbering
// debugging options; disable all when committing
debugHVN = false // enable assertions in HVN
debugHVNVerbose = false // enable extra HVN logging
debugHVNCrossCheck = false // run solver with/without HVN and compare (caveats below)
debugTimers = false // show running time of each phase
)
// object.flags bitmask values.
const (
otTagged = 1 << iota // type-tagged object
otIndirect // type-tagged object with indirect payload
otFunction // function object
)
// An object represents a contiguous block of memory to which some
// (generalized) pointer may point.
//
// (Note: most variables called 'obj' are not *objects but nodeids
// such that a.nodes[obj].obj != nil.)
//
type object struct {
// flags is a bitset of the node type (ot*) flags defined above.
flags uint32
// Number of following nodes belonging to the same "object"
// allocation. Zero for all other nodes.
size uint32
// data describes this object; it has one of these types:
//
// ssa.Value for an object allocated by an SSA operation.
// types.Type for an rtype instance object or *rtype-tagged object.
// string for an instrinsic object, e.g. the array behind os.Args.
// nil for an object allocated by an instrinsic.
// (cgn provides the identity of the intrinsic.)
data interface{}
// The call-graph node (=context) in which this object was allocated.
// May be nil for global objects: Global, Const, some Functions.
cgn *cgnode
}
// nodeid denotes a node.
// It is an index within analysis.nodes.
// We use small integers, not *node pointers, for many reasons:
// - they are smaller on 64-bit systems.
// - sets of them can be represented compactly in bitvectors or BDDs.
// - order matters; a field offset can be computed by simple addition.
type nodeid uint32
// A node is an equivalence class of memory locations.
// Nodes may be pointers, pointed-to locations, neither, or both.
//
// Nodes that are pointed-to locations ("labels") have an enclosing
// object (see analysis.enclosingObject).
//
type node struct {
// If non-nil, this node is the start of an object
// (addressable memory location).
// The following obj.size nodes implicitly belong to the object;
// they locate their object by scanning back.
obj *object
// The type of the field denoted by this node. Non-aggregate,
// unless this is an tagged.T node (i.e. the thing
// pointed to by an interface) in which case typ is that type.
typ types.Type
// subelement indicates which directly embedded subelement of
// an object of aggregate type (struct, tuple, array) this is.
subelement *fieldInfo // e.g. ".a.b[*].c"
// Solver state for the canonical node of this pointer-
// equivalence class. Each node is created with its own state
// but they become shared after HVN.
solve *solverState
}
// An analysis instance holds the state of a single pointer analysis problem.
type analysis struct {
config *Config // the client's control/observer interface
prog *ssa.Program // the program being analyzed
log io.Writer // log stream; nil to disable
panicNode nodeid // sink for panic, source for recover
nodes []*node // indexed by nodeid
flattenMemo map[types.Type][]*fieldInfo // memoization of flatten()
trackTypes map[types.Type]bool // memoization of shouldTrack()
constraints []constraint // set of constraints
cgnodes []*cgnode // all cgnodes
genq []*cgnode // queue of functions to generate constraints for
intrinsics map[*ssa.Function]intrinsic // non-nil values are summaries for intrinsic fns
globalval map[ssa.Value]nodeid // node for each global ssa.Value
globalobj map[ssa.Value]nodeid // maps v to sole member of pts(v), if singleton
localval map[ssa.Value]nodeid // node for each local ssa.Value
localobj map[ssa.Value]nodeid // maps v to sole member of pts(v), if singleton
atFuncs map[*ssa.Function]bool // address-taken functions (for presolver)
mapValues []nodeid // values of makemap objects (indirect in HVN)
work nodeset // solver's worklist
result *Result // results of the analysis
track track // pointerlike types whose aliasing we track
deltaSpace []int // working space for iterating over PTS deltas
// Reflection & intrinsics:
hasher typeutil.Hasher // cache of type hashes
reflectValueObj types.Object // type symbol for reflect.Value (if present)
reflectValueCall *ssa.Function // (reflect.Value).Call
reflectRtypeObj types.Object // *types.TypeName for reflect.rtype (if present)
reflectRtypePtr *types.Pointer // *reflect.rtype
reflectType *types.Named // reflect.Type
rtypes typeutil.Map // nodeid of canonical *rtype-tagged object for type T
reflectZeros typeutil.Map // nodeid of canonical T-tagged object for zero value
runtimeSetFinalizer *ssa.Function // runtime.SetFinalizer
}
// enclosingObj returns the first node of the addressable memory
// object that encloses node id. Panic ensues if that node does not
// belong to any object.
func (a *analysis) enclosingObj(id nodeid) nodeid {
// Find previous node with obj != nil.
for i := id; i >= 0; i-- {
n := a.nodes[i]
if obj := n.obj; obj != nil {
if i+nodeid(obj.size) <= id {
break // out of bounds
}
return i
}
}
panic("node has no enclosing object")
}
// labelFor returns the Label for node id.
// Panic ensues if that node is not addressable.
func (a *analysis) labelFor(id nodeid) *Label {
return &Label{
obj: a.nodes[a.enclosingObj(id)].obj,
subelement: a.nodes[id].subelement,
}
}
func (a *analysis) warnf(pos token.Pos, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
if a.log != nil {
fmt.Fprintf(a.log, "%s: warning: %s\n", a.prog.Fset.Position(pos), msg)
}
a.result.Warnings = append(a.result.Warnings, Warning{pos, msg})
}
// computeTrackBits sets a.track to the necessary 'track' bits for the pointer queries.
func (a *analysis) computeTrackBits() {
if len(a.config.extendedQueries) != 0 {
// TODO(dh): only track the types necessary for the query.
a.track = trackAll
return
}
var queryTypes []types.Type
for v := range a.config.Queries {
queryTypes = append(queryTypes, v.Type())
}
for v := range a.config.IndirectQueries {
queryTypes = append(queryTypes, mustDeref(v.Type()))
}
for _, t := range queryTypes {
switch t.Underlying().(type) {
case *types.Chan:
a.track |= trackChan
case *types.Map:
a.track |= trackMap
case *types.Pointer:
a.track |= trackPtr
case *types.Slice:
a.track |= trackSlice
case *types.Interface:
a.track = trackAll
return
}
if rVObj := a.reflectValueObj; rVObj != nil && types.Identical(t, rVObj.Type()) {
a.track = trackAll
return
}
}
}
// Analyze runs the pointer analysis with the scope and options
// specified by config, and returns the (synthetic) root of the callgraph.
//
// Pointer analysis of a transitively closed well-typed program should
// always succeed. An error can occur only due to an internal bug.
//
func Analyze(config *Config) (result *Result, err error) {
if config.Mains == nil {
return nil, fmt.Errorf("no main/test packages to analyze (check $GOROOT/$GOPATH)")
}
defer func() {
if p := recover(); p != nil {
err = fmt.Errorf("internal error in pointer analysis: %v (please report this bug)", p)
fmt.Fprintln(os.Stderr, "Internal panic in pointer analysis:")
debug.PrintStack()
}
}()
a := &analysis{
config: config,
log: config.Log,
prog: config.prog(),
globalval: make(map[ssa.Value]nodeid),
globalobj: make(map[ssa.Value]nodeid),
flattenMemo: make(map[types.Type][]*fieldInfo),
trackTypes: make(map[types.Type]bool),
atFuncs: make(map[*ssa.Function]bool),
hasher: typeutil.MakeHasher(),
intrinsics: make(map[*ssa.Function]intrinsic),
result: &Result{
Queries: make(map[ssa.Value]Pointer),
IndirectQueries: make(map[ssa.Value]Pointer),
},
deltaSpace: make([]int, 0, 100),
}
if false {
a.log = os.Stderr // for debugging crashes; extremely verbose
}
if a.log != nil {
fmt.Fprintln(a.log, "==== Starting analysis")
}
// Pointer analysis requires a complete program for soundness.
// Check to prevent accidental misconfiguration.
for _, pkg := range a.prog.AllPackages() {
// (This only checks that the package scope is complete,
// not that func bodies exist, but it's a good signal.)
if !pkg.Pkg.Complete() {
return nil, fmt.Errorf(`pointer analysis requires a complete program yet package %q was incomplete`, pkg.Pkg.Path())
}
}
if reflect := a.prog.ImportedPackage("reflect"); reflect != nil {
rV := reflect.Pkg.Scope().Lookup("Value")
a.reflectValueObj = rV
a.reflectValueCall = a.prog.LookupMethod(rV.Type(), nil, "Call")
a.reflectType = reflect.Pkg.Scope().Lookup("Type").Type().(*types.Named)
a.reflectRtypeObj = reflect.Pkg.Scope().Lookup("rtype")
a.reflectRtypePtr = types.NewPointer(a.reflectRtypeObj.Type())
// Override flattening of reflect.Value, treating it like a basic type.
tReflectValue := a.reflectValueObj.Type()
a.flattenMemo[tReflectValue] = []*fieldInfo{{typ: tReflectValue}}
// Override shouldTrack of reflect.Value and *reflect.rtype.
// Always track pointers of these types.
a.trackTypes[tReflectValue] = true
a.trackTypes[a.reflectRtypePtr] = true
a.rtypes.SetHasher(a.hasher)
a.reflectZeros.SetHasher(a.hasher)
}
if runtime := a.prog.ImportedPackage("runtime"); runtime != nil {
a.runtimeSetFinalizer = runtime.Func("SetFinalizer")
}
a.computeTrackBits()
a.generate()
a.showCounts()
if optRenumber {
a.renumber()
}
N := len(a.nodes) // excludes solver-created nodes
if optHVN {
if debugHVNCrossCheck {
// Cross-check: run the solver once without
// optimization, once with, and compare the
// solutions.
savedConstraints := a.constraints
a.solve()
a.dumpSolution("A.pts", N)
// Restore.
a.constraints = savedConstraints
for _, n := range a.nodes {
n.solve = new(solverState)
}
a.nodes = a.nodes[:N]
// rtypes is effectively part of the solver state.
a.rtypes = typeutil.Map{}
a.rtypes.SetHasher(a.hasher)
}
a.hvn()
}
if debugHVNCrossCheck {
runtime.GC()
runtime.GC()
}
a.solve()
// Compare solutions.
if optHVN && debugHVNCrossCheck {
a.dumpSolution("B.pts", N)
if !diff("A.pts", "B.pts") {
return nil, fmt.Errorf("internal error: optimization changed solution")
}
}
// Create callgraph.Nodes in deterministic order.
if cg := a.result.CallGraph; cg != nil {
for _, caller := range a.cgnodes {
cg.CreateNode(caller.fn)
}
}
// Add dynamic edges to call graph.
var space [100]int
for _, caller := range a.cgnodes {
for _, site := range caller.sites {
for _, callee := range a.nodes[site.targets].solve.pts.AppendTo(space[:0]) {
a.callEdge(caller, site, nodeid(callee))
}
}
}
return a.result, nil
}
// callEdge is called for each edge in the callgraph.
// calleeid is the callee's object node (has otFunction flag).
//
func (a *analysis) callEdge(caller *cgnode, site *callsite, calleeid nodeid) {
obj := a.nodes[calleeid].obj
if obj.flags&otFunction == 0 {
panic(fmt.Sprintf("callEdge %s -> n%d: not a function object", site, calleeid))
}
callee := obj.cgn
if cg := a.result.CallGraph; cg != nil {
// TODO(adonovan): opt: I would expect duplicate edges
// (to wrappers) to arise due to the elimination of
// context information, but I haven't observed any.
// Understand this better.
callgraph.AddEdge(cg.CreateNode(caller.fn), site.instr, cg.CreateNode(callee.fn))
}
if a.log != nil {
fmt.Fprintf(a.log, "\tcall edge %s -> %s\n", site, callee)
}
// Warn about calls to non-intrinsic external functions.
// TODO(adonovan): de-dup these messages.
if fn := callee.fn; fn.Blocks == nil && a.findIntrinsic(fn) == nil {
a.warnf(site.pos(), "unsound call to unknown intrinsic: %s", fn)
a.warnf(fn.Pos(), " (declared here)")
}
}
// dumpSolution writes the PTS solution to the specified file.
//
// It only dumps the nodes that existed before solving. The order in
// which solver-created nodes are created depends on pre-solver
// optimization, so we can't include them in the cross-check.
//
func (a *analysis) dumpSolution(filename string, N int) {
f, err := os.Create(filename)
if err != nil {
panic(err)
}
for id, n := range a.nodes[:N] {
if _, err := fmt.Fprintf(f, "pts(n%d) = {", id); err != nil {
panic(err)
}
var sep string
for _, l := range n.solve.pts.AppendTo(a.deltaSpace) {
if l >= N {
break
}
fmt.Fprintf(f, "%s%d", sep, l)
sep = " "
}
fmt.Fprintf(f, "} : %s\n", n.typ)
}
if err := f.Close(); err != nil {
panic(err)
}
}
// showCounts logs the size of the constraint system. A typical
// optimized distribution is 65% copy, 13% load, 11% addr, 5%
// offsetAddr, 4% store, 2% others.
//
func (a *analysis) showCounts() {
if a.log != nil {
counts := make(map[reflect.Type]int)
for _, c := range a.constraints {
counts[reflect.TypeOf(c)]++
}
fmt.Fprintf(a.log, "# constraints:\t%d\n", len(a.constraints))
var lines []string
for t, n := range counts {
line := fmt.Sprintf("%7d (%2d%%)\t%s", n, 100*n/len(a.constraints), t)
lines = append(lines, line)
}
sort.Sort(sort.Reverse(sort.StringSlice(lines)))
for _, line := range lines {
fmt.Fprintf(a.log, "\t%s\n", line)
}
fmt.Fprintf(a.log, "# nodes:\t%d\n", len(a.nodes))
// Show number of pointer equivalence classes.
m := make(map[*solverState]bool)
for _, n := range a.nodes {
m[n.solve] = true
}
fmt.Fprintf(a.log, "# ptsets:\t%d\n", len(m))
}
}

285
vendor/golang.org/x/tools/go/pointer/api.go generated vendored Normal file
View File

@@ -0,0 +1,285 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pointer
import (
"bytes"
"fmt"
"go/token"
"io"
"golang.org/x/tools/container/intsets"
"golang.org/x/tools/go/callgraph"
"golang.org/x/tools/go/ssa"
"golang.org/x/tools/go/types/typeutil"
)
// A Config formulates a pointer analysis problem for Analyze. It is
// only usable for a single invocation of Analyze and must not be
// reused.
type Config struct {
// Mains contains the set of 'main' packages to analyze
// Clients must provide the analysis with at least one
// package defining a main() function.
//
// Non-main packages in the ssa.Program that are not
// dependencies of any main package may still affect the
// analysis result, because they contribute runtime types and
// thus methods.
// TODO(adonovan): investigate whether this is desirable.
Mains []*ssa.Package
// Reflection determines whether to handle reflection
// operators soundly, which is currently rather slow since it
// causes constraint to be generated during solving
// proportional to the number of constraint variables, which
// has not yet been reduced by presolver optimisation.
Reflection bool
// BuildCallGraph determines whether to construct a callgraph.
// If enabled, the graph will be available in Result.CallGraph.
BuildCallGraph bool
// The client populates Queries[v] or IndirectQueries[v]
// for each ssa.Value v of interest, to request that the
// points-to sets pts(v) or pts(*v) be computed. If the
// client needs both points-to sets, v may appear in both
// maps.
//
// (IndirectQueries is typically used for Values corresponding
// to source-level lvalues, e.g. an *ssa.Global.)
//
// The analysis populates the corresponding
// Result.{Indirect,}Queries map when it creates the pointer
// variable for v or *v. Upon completion the client can
// inspect that map for the results.
//
// TODO(adonovan): this API doesn't scale well for batch tools
// that want to dump the entire solution. Perhaps optionally
// populate a map[*ssa.DebugRef]Pointer in the Result, one
// entry per source expression.
//
Queries map[ssa.Value]struct{}
IndirectQueries map[ssa.Value]struct{}
extendedQueries map[ssa.Value][]*extendedQuery
// If Log is non-nil, log messages are written to it.
// Logging is extremely verbose.
Log io.Writer
}
type track uint32
const (
trackChan track = 1 << iota // track 'chan' references
trackMap // track 'map' references
trackPtr // track regular pointers
trackSlice // track slice references
trackAll = ^track(0)
)
// AddQuery adds v to Config.Queries.
// Precondition: CanPoint(v.Type()).
func (c *Config) AddQuery(v ssa.Value) {
if !CanPoint(v.Type()) {
panic(fmt.Sprintf("%s is not a pointer-like value: %s", v, v.Type()))
}
if c.Queries == nil {
c.Queries = make(map[ssa.Value]struct{})
}
c.Queries[v] = struct{}{}
}
// AddQuery adds v to Config.IndirectQueries.
// Precondition: CanPoint(v.Type().Underlying().(*types.Pointer).Elem()).
func (c *Config) AddIndirectQuery(v ssa.Value) {
if c.IndirectQueries == nil {
c.IndirectQueries = make(map[ssa.Value]struct{})
}
if !CanPoint(mustDeref(v.Type())) {
panic(fmt.Sprintf("%s is not the address of a pointer-like value: %s", v, v.Type()))
}
c.IndirectQueries[v] = struct{}{}
}
// AddExtendedQuery adds an extended, AST-based query on v to the
// analysis. The query, which must be a single Go expression, allows
// destructuring the value.
//
// The query must operate on a variable named 'x', which represents
// the value, and result in a pointer-like object. Only a subset of
// Go expressions are permitted in queries, namely channel receives,
// pointer dereferences, field selectors, array/slice/map/tuple
// indexing and grouping with parentheses. The specific indices when
// indexing arrays, slices and maps have no significance. Indices used
// on tuples must be numeric and within bounds.
//
// All field selectors must be explicit, even ones usually elided
// due to promotion of embedded fields.
//
// The query 'x' is identical to using AddQuery. The query '*x' is
// identical to using AddIndirectQuery.
//
// On success, AddExtendedQuery returns a Pointer to the queried
// value. This Pointer will be initialized during analysis. Using it
// before analysis has finished has undefined behavior.
//
// Example:
// // given v, which represents a function call to 'fn() (int, []*T)', and
// // 'type T struct { F *int }', the following query will access the field F.
// c.AddExtendedQuery(v, "x[1][0].F")
func (c *Config) AddExtendedQuery(v ssa.Value, query string) (*Pointer, error) {
ops, _, err := parseExtendedQuery(v.Type().Underlying(), query)
if err != nil {
return nil, fmt.Errorf("invalid query %q: %s", query, err)
}
if c.extendedQueries == nil {
c.extendedQueries = make(map[ssa.Value][]*extendedQuery)
}
ptr := &Pointer{}
c.extendedQueries[v] = append(c.extendedQueries[v], &extendedQuery{ops: ops, ptr: ptr})
return ptr, nil
}
func (c *Config) prog() *ssa.Program {
for _, main := range c.Mains {
return main.Prog
}
panic("empty scope")
}
type Warning struct {
Pos token.Pos
Message string
}
// A Result contains the results of a pointer analysis.
//
// See Config for how to request the various Result components.
//
type Result struct {
CallGraph *callgraph.Graph // discovered call graph
Queries map[ssa.Value]Pointer // pts(v) for each v in Config.Queries.
IndirectQueries map[ssa.Value]Pointer // pts(*v) for each v in Config.IndirectQueries.
Warnings []Warning // warnings of unsoundness
}
// A Pointer is an equivalence class of pointer-like values.
//
// A Pointer doesn't have a unique type because pointers of distinct
// types may alias the same object.
//
type Pointer struct {
a *analysis
n nodeid
}
// A PointsToSet is a set of labels (locations or allocations).
type PointsToSet struct {
a *analysis // may be nil if pts is nil
pts *nodeset
}
func (s PointsToSet) String() string {
var buf bytes.Buffer
buf.WriteByte('[')
if s.pts != nil {
var space [50]int
for i, l := range s.pts.AppendTo(space[:0]) {
if i > 0 {
buf.WriteString(", ")
}
buf.WriteString(s.a.labelFor(nodeid(l)).String())
}
}
buf.WriteByte(']')
return buf.String()
}
// PointsTo returns the set of labels that this points-to set
// contains.
func (s PointsToSet) Labels() []*Label {
var labels []*Label
if s.pts != nil {
var space [50]int
for _, l := range s.pts.AppendTo(space[:0]) {
labels = append(labels, s.a.labelFor(nodeid(l)))
}
}
return labels
}
// If this PointsToSet came from a Pointer of interface kind
// or a reflect.Value, DynamicTypes returns the set of dynamic
// types that it may contain. (For an interface, they will
// always be concrete types.)
//
// The result is a mapping whose keys are the dynamic types to which
// it may point. For each pointer-like key type, the corresponding
// map value is the PointsToSet for pointers of that type.
//
// The result is empty unless CanHaveDynamicTypes(T).
//
func (s PointsToSet) DynamicTypes() *typeutil.Map {
var tmap typeutil.Map
tmap.SetHasher(s.a.hasher)
if s.pts != nil {
var space [50]int
for _, x := range s.pts.AppendTo(space[:0]) {
ifaceObjId := nodeid(x)
if !s.a.isTaggedObject(ifaceObjId) {
continue // !CanHaveDynamicTypes(tDyn)
}
tDyn, v, indirect := s.a.taggedValue(ifaceObjId)
if indirect {
panic("indirect tagged object") // implement later
}
pts, ok := tmap.At(tDyn).(PointsToSet)
if !ok {
pts = PointsToSet{s.a, new(nodeset)}
tmap.Set(tDyn, pts)
}
pts.pts.addAll(&s.a.nodes[v].solve.pts)
}
}
return &tmap
}
// Intersects reports whether this points-to set and the
// argument points-to set contain common members.
func (x PointsToSet) Intersects(y PointsToSet) bool {
if x.pts == nil || y.pts == nil {
return false
}
// This takes Θ(|x|+|y|) time.
var z intsets.Sparse
z.Intersection(&x.pts.Sparse, &y.pts.Sparse)
return !z.IsEmpty()
}
func (p Pointer) String() string {
return fmt.Sprintf("n%d", p.n)
}
// PointsTo returns the points-to set of this pointer.
func (p Pointer) PointsTo() PointsToSet {
if p.n == 0 {
return PointsToSet{}
}
return PointsToSet{p.a, &p.a.nodes[p.n].solve.pts}
}
// MayAlias reports whether the receiver pointer may alias
// the argument pointer.
func (p Pointer) MayAlias(q Pointer) bool {
return p.PointsTo().Intersects(q.PointsTo())
}
// DynamicTypes returns p.PointsTo().DynamicTypes().
func (p Pointer) DynamicTypes() *typeutil.Map {
return p.PointsTo().DynamicTypes()
}

61
vendor/golang.org/x/tools/go/pointer/callgraph.go generated vendored Normal file
View File

@@ -0,0 +1,61 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pointer
// This file defines the internal (context-sensitive) call graph.
import (
"fmt"
"go/token"
"golang.org/x/tools/go/ssa"
)
type cgnode struct {
fn *ssa.Function
obj nodeid // start of this contour's object block
sites []*callsite // ordered list of callsites within this function
callersite *callsite // where called from, if known; nil for shared contours
}
// contour returns a description of this node's contour.
func (n *cgnode) contour() string {
if n.callersite == nil {
return "shared contour"
}
if n.callersite.instr != nil {
return fmt.Sprintf("as called from %s", n.callersite.instr.Parent())
}
return fmt.Sprintf("as called from intrinsic (targets=n%d)", n.callersite.targets)
}
func (n *cgnode) String() string {
return fmt.Sprintf("cg%d:%s", n.obj, n.fn)
}
// A callsite represents a single call site within a cgnode;
// it is implicitly context-sensitive.
// callsites never represent calls to built-ins;
// they are handled as intrinsics.
//
type callsite struct {
targets nodeid // pts(·) contains objects for dynamically called functions
instr ssa.CallInstruction // the call instruction; nil for synthetic/intrinsic
}
func (c *callsite) String() string {
if c.instr != nil {
return c.instr.Common().Description()
}
return "synthetic function call"
}
// pos returns the source position of this callsite, or token.NoPos if implicit.
func (c *callsite) pos() token.Pos {
if c.instr != nil {
return c.instr.Pos()
}
return token.NoPos
}

149
vendor/golang.org/x/tools/go/pointer/constraint.go generated vendored Normal file
View File

@@ -0,0 +1,149 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pointer
import "go/types"
type constraint interface {
// For a complex constraint, returns the nodeid of the pointer
// to which it is attached. For addr and copy, returns dst.
ptr() nodeid
// renumber replaces each nodeid n in the constraint by mapping[n].
renumber(mapping []nodeid)
// presolve is a hook for constraint-specific behaviour during
// pre-solver optimization. Typical implementations mark as
// indirect the set of nodes to which the solver will add copy
// edges or PTS labels.
presolve(h *hvn)
// solve is called for complex constraints when the pts for
// the node to which they are attached has changed.
solve(a *analysis, delta *nodeset)
String() string
}
// dst = &src
// pts(dst) ⊇ {src}
// A base constraint used to initialize the solver's pt sets
type addrConstraint struct {
dst nodeid // (ptr)
src nodeid
}
func (c *addrConstraint) ptr() nodeid { return c.dst }
func (c *addrConstraint) renumber(mapping []nodeid) {
c.dst = mapping[c.dst]
c.src = mapping[c.src]
}
// dst = src
// A simple constraint represented directly as a copyTo graph edge.
type copyConstraint struct {
dst nodeid // (ptr)
src nodeid
}
func (c *copyConstraint) ptr() nodeid { return c.dst }
func (c *copyConstraint) renumber(mapping []nodeid) {
c.dst = mapping[c.dst]
c.src = mapping[c.src]
}
// dst = src[offset]
// A complex constraint attached to src (the pointer)
type loadConstraint struct {
offset uint32
dst nodeid
src nodeid // (ptr)
}
func (c *loadConstraint) ptr() nodeid { return c.src }
func (c *loadConstraint) renumber(mapping []nodeid) {
c.dst = mapping[c.dst]
c.src = mapping[c.src]
}
// dst[offset] = src
// A complex constraint attached to dst (the pointer)
type storeConstraint struct {
offset uint32
dst nodeid // (ptr)
src nodeid
}
func (c *storeConstraint) ptr() nodeid { return c.dst }
func (c *storeConstraint) renumber(mapping []nodeid) {
c.dst = mapping[c.dst]
c.src = mapping[c.src]
}
// dst = &src.f or dst = &src[0]
// A complex constraint attached to dst (the pointer)
type offsetAddrConstraint struct {
offset uint32
dst nodeid
src nodeid // (ptr)
}
func (c *offsetAddrConstraint) ptr() nodeid { return c.src }
func (c *offsetAddrConstraint) renumber(mapping []nodeid) {
c.dst = mapping[c.dst]
c.src = mapping[c.src]
}
// dst = src.(typ) where typ is an interface
// A complex constraint attached to src (the interface).
// No representation change: pts(dst) and pts(src) contains tagged objects.
type typeFilterConstraint struct {
typ types.Type // an interface type
dst nodeid
src nodeid // (ptr)
}
func (c *typeFilterConstraint) ptr() nodeid { return c.src }
func (c *typeFilterConstraint) renumber(mapping []nodeid) {
c.dst = mapping[c.dst]
c.src = mapping[c.src]
}
// dst = src.(typ) where typ is a concrete type
// A complex constraint attached to src (the interface).
//
// If exact, only tagged objects identical to typ are untagged.
// If !exact, tagged objects assignable to typ are untagged too.
// The latter is needed for various reflect operators, e.g. Send.
//
// This entails a representation change:
// pts(src) contains tagged objects,
// pts(dst) contains their payloads.
type untagConstraint struct {
typ types.Type // a concrete type
dst nodeid
src nodeid // (ptr)
exact bool
}
func (c *untagConstraint) ptr() nodeid { return c.src }
func (c *untagConstraint) renumber(mapping []nodeid) {
c.dst = mapping[c.dst]
c.src = mapping[c.src]
}
// src.method(params...)
// A complex constraint attached to iface.
type invokeConstraint struct {
method *types.Func // the abstract method
iface nodeid // (ptr) the interface
params nodeid // the start of the identity/params/results block
}
func (c *invokeConstraint) ptr() nodeid { return c.iface }
func (c *invokeConstraint) renumber(mapping []nodeid) {
c.iface = mapping[c.iface]
c.params = mapping[c.params]
}

610
vendor/golang.org/x/tools/go/pointer/doc.go generated vendored Normal file
View File

@@ -0,0 +1,610 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package pointer implements Andersen's analysis, an inclusion-based
pointer analysis algorithm first described in (Andersen, 1994).
A pointer analysis relates every pointer expression in a whole program
to the set of memory locations to which it might point. This
information can be used to construct a call graph of the program that
precisely represents the destinations of dynamic function and method
calls. It can also be used to determine, for example, which pairs of
channel operations operate on the same channel.
The package allows the client to request a set of expressions of
interest for which the points-to information will be returned once the
analysis is complete. In addition, the client may request that a
callgraph is constructed. The example program in example_test.go
demonstrates both of these features. Clients should not request more
information than they need since it may increase the cost of the
analysis significantly.
CLASSIFICATION
Our algorithm is INCLUSION-BASED: the points-to sets for x and y will
be related by pts(y) ⊇ pts(x) if the program contains the statement
y = x.
It is FLOW-INSENSITIVE: it ignores all control flow constructs and the
order of statements in a program. It is therefore a "MAY ALIAS"
analysis: its facts are of the form "P may/may not point to L",
not "P must point to L".
It is FIELD-SENSITIVE: it builds separate points-to sets for distinct
fields, such as x and y in struct { x, y *int }.
It is mostly CONTEXT-INSENSITIVE: most functions are analyzed once,
so values can flow in at one call to the function and return out at
another. Only some smaller functions are analyzed with consideration
of their calling context.
It has a CONTEXT-SENSITIVE HEAP: objects are named by both allocation
site and context, so the objects returned by two distinct calls to f:
func f() *T { return new(T) }
are distinguished up to the limits of the calling context.
It is a WHOLE PROGRAM analysis: it requires SSA-form IR for the
complete Go program and summaries for native code.
See the (Hind, PASTE'01) survey paper for an explanation of these terms.
SOUNDNESS
The analysis is fully sound when invoked on pure Go programs that do not
use reflection or unsafe.Pointer conversions. In other words, if there
is any possible execution of the program in which pointer P may point to
object O, the analysis will report that fact.
REFLECTION
By default, the "reflect" library is ignored by the analysis, as if all
its functions were no-ops, but if the client enables the Reflection flag,
the analysis will make a reasonable attempt to model the effects of
calls into this library. However, this comes at a significant
performance cost, and not all features of that library are yet
implemented. In addition, some simplifying approximations must be made
to ensure that the analysis terminates; for example, reflection can be
used to construct an infinite set of types and values of those types,
but the analysis arbitrarily bounds the depth of such types.
Most but not all reflection operations are supported.
In particular, addressable reflect.Values are not yet implemented, so
operations such as (reflect.Value).Set have no analytic effect.
UNSAFE POINTER CONVERSIONS
The pointer analysis makes no attempt to understand aliasing between the
operand x and result y of an unsafe.Pointer conversion:
y = (*T)(unsafe.Pointer(x))
It is as if the conversion allocated an entirely new object:
y = new(T)
NATIVE CODE
The analysis cannot model the aliasing effects of functions written in
languages other than Go, such as runtime intrinsics in C or assembly, or
code accessed via cgo. The result is as if such functions are no-ops.
However, various important intrinsics are understood by the analysis,
along with built-ins such as append.
The analysis currently provides no way for users to specify the aliasing
effects of native code.
------------------------------------------------------------------------
IMPLEMENTATION
The remaining documentation is intended for package maintainers and
pointer analysis specialists. Maintainers should have a solid
understanding of the referenced papers (especially those by H&L and PKH)
before making making significant changes.
The implementation is similar to that described in (Pearce et al,
PASTE'04). Unlike many algorithms which interleave constraint
generation and solving, constructing the callgraph as they go, this
implementation for the most part observes a phase ordering (generation
before solving), with only simple (copy) constraints being generated
during solving. (The exception is reflection, which creates various
constraints during solving as new types flow to reflect.Value
operations.) This improves the traction of presolver optimisations,
but imposes certain restrictions, e.g. potential context sensitivity
is limited since all variants must be created a priori.
TERMINOLOGY
A type is said to be "pointer-like" if it is a reference to an object.
Pointer-like types include pointers and also interfaces, maps, channels,
functions and slices.
We occasionally use C's x->f notation to distinguish the case where x
is a struct pointer from x.f where is a struct value.
Pointer analysis literature (and our comments) often uses the notation
dst=*src+offset to mean something different than what it means in Go.
It means: for each node index p in pts(src), the node index p+offset is
in pts(dst). Similarly *dst+offset=src is used for store constraints
and dst=src+offset for offset-address constraints.
NODES
Nodes are the key datastructure of the analysis, and have a dual role:
they represent both constraint variables (equivalence classes of
pointers) and members of points-to sets (things that can be pointed
at, i.e. "labels").
Nodes are naturally numbered. The numbering enables compact
representations of sets of nodes such as bitvectors (or BDDs); and the
ordering enables a very cheap way to group related nodes together. For
example, passing n parameters consists of generating n parallel
constraints from caller+i to callee+i for 0<=i<n.
The zero nodeid means "not a pointer". For simplicity, we generate flow
constraints even for non-pointer types such as int. The pointer
equivalence (PE) presolver optimization detects which variables cannot
point to anything; this includes not only all variables of non-pointer
types (such as int) but also variables of pointer-like types if they are
always nil, or are parameters to a function that is never called.
Each node represents a scalar part of a value or object.
Aggregate types (structs, tuples, arrays) are recursively flattened
out into a sequential list of scalar component types, and all the
elements of an array are represented by a single node. (The
flattening of a basic type is a list containing a single node.)
Nodes are connected into a graph with various kinds of labelled edges:
simple edges (or copy constraints) represent value flow. Complex
edges (load, store, etc) trigger the creation of new simple edges
during the solving phase.
OBJECTS
Conceptually, an "object" is a contiguous sequence of nodes denoting
an addressable location: something that a pointer can point to. The
first node of an object has a non-nil obj field containing information
about the allocation: its size, context, and ssa.Value.
Objects include:
- functions and globals;
- variable allocations in the stack frame or heap;
- maps, channels and slices created by calls to make();
- allocations to construct an interface;
- allocations caused by conversions, e.g. []byte(str).
- arrays allocated by calls to append();
Many objects have no Go types. For example, the func, map and chan type
kinds in Go are all varieties of pointers, but their respective objects
are actual functions (executable code), maps (hash tables), and channels
(synchronized queues). Given the way we model interfaces, they too are
pointers to "tagged" objects with no Go type. And an *ssa.Global denotes
the address of a global variable, but the object for a Global is the
actual data. So, the types of an ssa.Value that creates an object is
"off by one indirection": a pointer to the object.
The individual nodes of an object are sometimes referred to as "labels".
For uniformity, all objects have a non-zero number of fields, even those
of the empty type struct{}. (All arrays are treated as if of length 1,
so there are no empty arrays. The empty tuple is never address-taken,
so is never an object.)
TAGGED OBJECTS
An tagged object has the following layout:
T -- obj.flags ⊇ {otTagged}
v
...
The T node's typ field is the dynamic type of the "payload": the value
v which follows, flattened out. The T node's obj has the otTagged
flag.
Tagged objects are needed when generalizing across types: interfaces,
reflect.Values, reflect.Types. Each of these three types is modelled
as a pointer that exclusively points to tagged objects.
Tagged objects may be indirect (obj.flags ⊇ {otIndirect}) meaning that
the value v is not of type T but *T; this is used only for
reflect.Values that represent lvalues. (These are not implemented yet.)
ANALYSIS ABSTRACTION OF EACH TYPE
Variables of the following "scalar" types may be represented by a
single node: basic types, pointers, channels, maps, slices, 'func'
pointers, interfaces.
Pointers
Nothing to say here, oddly.
Basic types (bool, string, numbers, unsafe.Pointer)
Currently all fields in the flattening of a type, including
non-pointer basic types such as int, are represented in objects and
values. Though non-pointer nodes within values are uninteresting,
non-pointer nodes in objects may be useful (if address-taken)
because they permit the analysis to deduce, in this example,
var s struct{ ...; x int; ... }
p := &s.x
that p points to s.x. If we ignored such object fields, we could only
say that p points somewhere within s.
All other basic types are ignored. Expressions of these types have
zero nodeid, and fields of these types within aggregate other types
are omitted.
unsafe.Pointers are not modelled as pointers, so a conversion of an
unsafe.Pointer to *T is (unsoundly) treated equivalent to new(T).
Channels
An expression of type 'chan T' is a kind of pointer that points
exclusively to channel objects, i.e. objects created by MakeChan (or
reflection).
'chan T' is treated like *T.
*ssa.MakeChan is treated as equivalent to new(T).
*ssa.Send and receive (*ssa.UnOp(ARROW)) and are equivalent to store
and load.
Maps
An expression of type 'map[K]V' is a kind of pointer that points
exclusively to map objects, i.e. objects created by MakeMap (or
reflection).
map K[V] is treated like *M where M = struct{k K; v V}.
*ssa.MakeMap is equivalent to new(M).
*ssa.MapUpdate is equivalent to *y=x where *y and x have type M.
*ssa.Lookup is equivalent to y=x.v where x has type *M.
Slices
A slice []T, which dynamically resembles a struct{array *T, len, cap int},
is treated as if it were just a *T pointer; the len and cap fields are
ignored.
*ssa.MakeSlice is treated like new([1]T): an allocation of a
singleton array.
*ssa.Index on a slice is equivalent to a load.
*ssa.IndexAddr on a slice returns the address of the sole element of the
slice, i.e. the same address.
*ssa.Slice is treated as a simple copy.
Functions
An expression of type 'func...' is a kind of pointer that points
exclusively to function objects.
A function object has the following layout:
identity -- typ:*types.Signature; obj.flags ⊇ {otFunction}
params_0 -- (the receiver, if a method)
...
params_n-1
results_0
...
results_m-1
There may be multiple function objects for the same *ssa.Function
due to context-sensitive treatment of some functions.
The first node is the function's identity node.
Associated with every callsite is a special "targets" variable,
whose pts() contains the identity node of each function to which
the call may dispatch. Identity words are not otherwise used during
the analysis, but we construct the call graph from the pts()
solution for such nodes.
The following block of contiguous nodes represents the flattened-out
types of the parameters ("P-block") and results ("R-block") of the
function object.
The treatment of free variables of closures (*ssa.FreeVar) is like
that of global variables; it is not context-sensitive.
*ssa.MakeClosure instructions create copy edges to Captures.
A Go value of type 'func' (i.e. a pointer to one or more functions)
is a pointer whose pts() contains function objects. The valueNode()
for an *ssa.Function returns a singleton for that function.
Interfaces
An expression of type 'interface{...}' is a kind of pointer that
points exclusively to tagged objects. All tagged objects pointed to
by an interface are direct (the otIndirect flag is clear) and
concrete (the tag type T is not itself an interface type). The
associated ssa.Value for an interface's tagged objects may be an
*ssa.MakeInterface instruction, or nil if the tagged object was
created by an instrinsic (e.g. reflection).
Constructing an interface value causes generation of constraints for
all of the concrete type's methods; we can't tell a priori which
ones may be called.
TypeAssert y = x.(T) is implemented by a dynamic constraint
triggered by each tagged object O added to pts(x): a typeFilter
constraint if T is an interface type, or an untag constraint if T is
a concrete type. A typeFilter tests whether O.typ implements T; if
so, O is added to pts(y). An untagFilter tests whether O.typ is
assignable to T,and if so, a copy edge O.v -> y is added.
ChangeInterface is a simple copy because the representation of
tagged objects is independent of the interface type (in contrast
to the "method tables" approach used by the gc runtime).
y := Invoke x.m(...) is implemented by allocating contiguous P/R
blocks for the callsite and adding a dynamic rule triggered by each
tagged object added to pts(x). The rule adds param/results copy
edges to/from each discovered concrete method.
(Q. Why do we model an interface as a pointer to a pair of type and
value, rather than as a pair of a pointer to type and a pointer to
value?
A. Control-flow joins would merge interfaces ({T1}, {V1}) and ({T2},
{V2}) to make ({T1,T2}, {V1,V2}), leading to the infeasible and
type-unsafe combination (T1,V2). Treating the value and its concrete
type as inseparable makes the analysis type-safe.)
reflect.Value
A reflect.Value is modelled very similar to an interface{}, i.e. as
a pointer exclusively to tagged objects, but with two generalizations.
1) a reflect.Value that represents an lvalue points to an indirect
(obj.flags ⊇ {otIndirect}) tagged object, which has a similar
layout to an tagged object except that the value is a pointer to
the dynamic type. Indirect tagged objects preserve the correct
aliasing so that mutations made by (reflect.Value).Set can be
observed.
Indirect objects only arise when an lvalue is derived from an
rvalue by indirection, e.g. the following code:
type S struct { X T }
var s S
var i interface{} = &s // i points to a *S-tagged object (from MakeInterface)
v1 := reflect.ValueOf(i) // v1 points to same *S-tagged object as i
v2 := v1.Elem() // v2 points to an indirect S-tagged object, pointing to s
v3 := v2.FieldByName("X") // v3 points to an indirect int-tagged object, pointing to s.X
v3.Set(y) // pts(s.X) ⊇ pts(y)
Whether indirect or not, the concrete type of the tagged object
corresponds to the user-visible dynamic type, and the existence
of a pointer is an implementation detail.
(NB: indirect tagged objects are not yet implemented)
2) The dynamic type tag of a tagged object pointed to by a
reflect.Value may be an interface type; it need not be concrete.
This arises in code such as this:
tEface := reflect.TypeOf(new(interface{}).Elem() // interface{}
eface := reflect.Zero(tEface)
pts(eface) is a singleton containing an interface{}-tagged
object. That tagged object's payload is an interface{} value,
i.e. the pts of the payload contains only concrete-tagged
objects, although in this example it's the zero interface{} value,
so its pts is empty.
reflect.Type
Just as in the real "reflect" library, we represent a reflect.Type
as an interface whose sole implementation is the concrete type,
*reflect.rtype. (This choice is forced on us by go/types: clients
cannot fabricate types with arbitrary method sets.)
rtype instances are canonical: there is at most one per dynamic
type. (rtypes are in fact large structs but since identity is all
that matters, we represent them by a single node.)
The payload of each *rtype-tagged object is an *rtype pointer that
points to exactly one such canonical rtype object. We exploit this
by setting the node.typ of the payload to the dynamic type, not
'*rtype'. This saves us an indirection in each resolution rule. As
an optimisation, *rtype-tagged objects are canonicalized too.
Aggregate types:
Aggregate types are treated as if all directly contained
aggregates are recursively flattened out.
Structs
*ssa.Field y = x.f creates a simple edge to y from x's node at f's offset.
*ssa.FieldAddr y = &x->f requires a dynamic closure rule to create
simple edges for each struct discovered in pts(x).
The nodes of a struct consist of a special 'identity' node (whose
type is that of the struct itself), followed by the nodes for all
the struct's fields, recursively flattened out. A pointer to the
struct is a pointer to its identity node. That node allows us to
distinguish a pointer to a struct from a pointer to its first field.
Field offsets are logical field offsets (plus one for the identity
node), so the sizes of the fields can be ignored by the analysis.
(The identity node is non-traditional but enables the distinction
described above, which is valuable for code comprehension tools.
Typical pointer analyses for C, whose purpose is compiler
optimization, must soundly model unsafe.Pointer (void*) conversions,
and this requires fidelity to the actual memory layout using physical
field offsets.)
*ssa.Field y = x.f creates a simple edge to y from x's node at f's offset.
*ssa.FieldAddr y = &x->f requires a dynamic closure rule to create
simple edges for each struct discovered in pts(x).
Arrays
We model an array by an identity node (whose type is that of the
array itself) followed by a node representing all the elements of
the array; the analysis does not distinguish elements with different
indices. Effectively, an array is treated like struct{elem T}, a
load y=x[i] like y=x.elem, and a store x[i]=y like x.elem=y; the
index i is ignored.
A pointer to an array is pointer to its identity node. (A slice is
also a pointer to an array's identity node.) The identity node
allows us to distinguish a pointer to an array from a pointer to one
of its elements, but it is rather costly because it introduces more
offset constraints into the system. Furthermore, sound treatment of
unsafe.Pointer would require us to dispense with this node.
Arrays may be allocated by Alloc, by make([]T), by calls to append,
and via reflection.
Tuples (T, ...)
Tuples are treated like structs with naturally numbered fields.
*ssa.Extract is analogous to *ssa.Field.
However, tuples have no identity field since by construction, they
cannot be address-taken.
FUNCTION CALLS
There are three kinds of function call:
(1) static "call"-mode calls of functions.
(2) dynamic "call"-mode calls of functions.
(3) dynamic "invoke"-mode calls of interface methods.
Cases 1 and 2 apply equally to methods and standalone functions.
Static calls.
A static call consists three steps:
- finding the function object of the callee;
- creating copy edges from the actual parameter value nodes to the
P-block in the function object (this includes the receiver if
the callee is a method);
- creating copy edges from the R-block in the function object to
the value nodes for the result of the call.
A static function call is little more than two struct value copies
between the P/R blocks of caller and callee:
callee.P = caller.P
caller.R = callee.R
Context sensitivity
Static calls (alone) may be treated context sensitively,
i.e. each callsite may cause a distinct re-analysis of the
callee, improving precision. Our current context-sensitivity
policy treats all intrinsics and getter/setter methods in this
manner since such functions are small and seem like an obvious
source of spurious confluences, though this has not yet been
evaluated.
Dynamic function calls
Dynamic calls work in a similar manner except that the creation of
copy edges occurs dynamically, in a similar fashion to a pair of
struct copies in which the callee is indirect:
callee->P = caller.P
caller.R = callee->R
(Recall that the function object's P- and R-blocks are contiguous.)
Interface method invocation
For invoke-mode calls, we create a params/results block for the
callsite and attach a dynamic closure rule to the interface. For
each new tagged object that flows to the interface, we look up
the concrete method, find its function object, and connect its P/R
blocks to the callsite's P/R blocks, adding copy edges to the graph
during solving.
Recording call targets
The analysis notifies its clients of each callsite it encounters,
passing a CallSite interface. Among other things, the CallSite
contains a synthetic constraint variable ("targets") whose
points-to solution includes the set of all function objects to
which the call may dispatch.
It is via this mechanism that the callgraph is made available.
Clients may also elect to be notified of callgraph edges directly;
internally this just iterates all "targets" variables' pts(·)s.
PRESOLVER
We implement Hash-Value Numbering (HVN), a pre-solver constraint
optimization described in Hardekopf & Lin, SAS'07. This is documented
in more detail in hvn.go. We intend to add its cousins HR and HU in
future.
SOLVER
The solver is currently a naive Andersen-style implementation; it does
not perform online cycle detection, though we plan to add solver
optimisations such as Hybrid- and Lazy- Cycle Detection from (Hardekopf
& Lin, PLDI'07).
It uses difference propagation (Pearce et al, SQC'04) to avoid
redundant re-triggering of closure rules for values already seen.
Points-to sets are represented using sparse bit vectors (similar to
those used in LLVM and gcc), which are more space- and time-efficient
than sets based on Go's built-in map type or dense bit vectors.
Nodes are permuted prior to solving so that object nodes (which may
appear in points-to sets) are lower numbered than non-object (var)
nodes. This improves the density of the set over which the PTSs
range, and thus the efficiency of the representation.
Partly thanks to avoiding map iteration, the execution of the solver is
100% deterministic, a great help during debugging.
FURTHER READING
Andersen, L. O. 1994. Program analysis and specialization for the C
programming language. Ph.D. dissertation. DIKU, University of
Copenhagen.
David J. Pearce, Paul H. J. Kelly, and Chris Hankin. 2004. Efficient
field-sensitive pointer analysis for C. In Proceedings of the 5th ACM
SIGPLAN-SIGSOFT workshop on Program analysis for software tools and
engineering (PASTE '04). ACM, New York, NY, USA, 37-42.
http://doi.acm.org/10.1145/996821.996835
David J. Pearce, Paul H. J. Kelly, and Chris Hankin. 2004. Online
Cycle Detection and Difference Propagation: Applications to Pointer
Analysis. Software Quality Control 12, 4 (December 2004), 311-337.
http://dx.doi.org/10.1023/B:SQJO.0000039791.93071.a2
David Grove and Craig Chambers. 2001. A framework for call graph
construction algorithms. ACM Trans. Program. Lang. Syst. 23, 6
(November 2001), 685-746.
http://doi.acm.org/10.1145/506315.506316
Ben Hardekopf and Calvin Lin. 2007. The ant and the grasshopper: fast
and accurate pointer analysis for millions of lines of code. In
Proceedings of the 2007 ACM SIGPLAN conference on Programming language
design and implementation (PLDI '07). ACM, New York, NY, USA, 290-299.
http://doi.acm.org/10.1145/1250734.1250767
Ben Hardekopf and Calvin Lin. 2007. Exploiting pointer and location
equivalence to optimize pointer analysis. In Proceedings of the 14th
international conference on Static Analysis (SAS'07), Hanne Riis
Nielson and Gilberto Filé (Eds.). Springer-Verlag, Berlin, Heidelberg,
265-280.
Atanas Rountev and Satish Chandra. 2000. Off-line variable substitution
for scaling points-to analysis. In Proceedings of the ACM SIGPLAN 2000
conference on Programming language design and implementation (PLDI '00).
ACM, New York, NY, USA, 47-56. DOI=10.1145/349299.349310
http://doi.acm.org/10.1145/349299.349310
*/
package pointer // import "golang.org/x/tools/go/pointer"

126
vendor/golang.org/x/tools/go/pointer/example_test.go generated vendored Normal file
View File

@@ -0,0 +1,126 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pointer_test
import (
"fmt"
"sort"
"golang.org/x/tools/go/callgraph"
"golang.org/x/tools/go/loader"
"golang.org/x/tools/go/pointer"
"golang.org/x/tools/go/ssa"
"golang.org/x/tools/go/ssa/ssautil"
)
// This program demonstrates how to use the pointer analysis to
// obtain a conservative call-graph of a Go program.
// It also shows how to compute the points-to set of a variable,
// in this case, (C).f's ch parameter.
//
func Example() {
const myprog = `
package main
import "fmt"
type I interface {
f(map[string]int)
}
type C struct{}
func (C) f(m map[string]int) {
fmt.Println("C.f()")
}
func main() {
var i I = C{}
x := map[string]int{"one":1}
i.f(x) // dynamic method call
}
`
var conf loader.Config
// Parse the input file, a string.
// (Command-line tools should use conf.FromArgs.)
file, err := conf.ParseFile("myprog.go", myprog)
if err != nil {
fmt.Print(err) // parse error
return
}
// Create single-file main package and import its dependencies.
conf.CreateFromFiles("main", file)
iprog, err := conf.Load()
if err != nil {
fmt.Print(err) // type error in some package
return
}
// Create SSA-form program representation.
prog := ssautil.CreateProgram(iprog, 0)
mainPkg := prog.Package(iprog.Created[0].Pkg)
// Build SSA code for bodies of all functions in the whole program.
prog.Build()
// Configure the pointer analysis to build a call-graph.
config := &pointer.Config{
Mains: []*ssa.Package{mainPkg},
BuildCallGraph: true,
}
// Query points-to set of (C).f's parameter m, a map.
C := mainPkg.Type("C").Type()
Cfm := prog.LookupMethod(C, mainPkg.Pkg, "f").Params[1]
config.AddQuery(Cfm)
// Run the pointer analysis.
result, err := pointer.Analyze(config)
if err != nil {
panic(err) // internal error in pointer analysis
}
// Find edges originating from the main package.
// By converting to strings, we de-duplicate nodes
// representing the same function due to context sensitivity.
var edges []string
callgraph.GraphVisitEdges(result.CallGraph, func(edge *callgraph.Edge) error {
caller := edge.Caller.Func
if caller.Pkg == mainPkg {
edges = append(edges, fmt.Sprint(caller, " --> ", edge.Callee.Func))
}
return nil
})
// Print the edges in sorted order.
sort.Strings(edges)
for _, edge := range edges {
fmt.Println(edge)
}
fmt.Println()
// Print the labels of (C).f(m)'s points-to set.
fmt.Println("m may point to:")
var labels []string
for _, l := range result.Queries[Cfm].PointsTo().Labels() {
label := fmt.Sprintf(" %s: %s", prog.Fset.Position(l.Pos()), l)
labels = append(labels, label)
}
sort.Strings(labels)
for _, label := range labels {
fmt.Println(label)
}
// Output:
// (main.C).f --> fmt.Println
// main.init --> fmt.init
// main.main --> (main.C).f
//
// m may point to:
// myprog.go:18:21: makemap
}

1325
vendor/golang.org/x/tools/go/pointer/gen.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

973
vendor/golang.org/x/tools/go/pointer/hvn.go generated vendored Normal file
View File

@@ -0,0 +1,973 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pointer
// This file implements Hash-Value Numbering (HVN), a pre-solver
// constraint optimization described in Hardekopf & Lin, SAS'07 (see
// doc.go) that analyses the graph topology to determine which sets of
// variables are "pointer equivalent" (PE), i.e. must have identical
// points-to sets in the solution.
//
// A separate ("offline") graph is constructed. Its nodes are those of
// the main-graph, plus an additional node *X for each pointer node X.
// With this graph we can reason about the unknown points-to set of
// dereferenced pointers. (We do not generalize this to represent
// unknown fields x->f, perhaps because such fields would be numerous,
// though it might be worth an experiment.)
//
// Nodes whose points-to relations are not entirely captured by the
// graph are marked as "indirect": the *X nodes, the parameters of
// address-taken functions (which includes all functions in method
// sets), or nodes updated by the solver rules for reflection, etc.
//
// All addr (y=&x) nodes are initially assigned a pointer-equivalence
// (PE) label equal to x's nodeid in the main graph. (These are the
// only PE labels that are less than len(a.nodes).)
//
// All offsetAddr (y=&x.f) constraints are initially assigned a PE
// label; such labels are memoized, keyed by (x, f), so that equivalent
// nodes y as assigned the same label.
//
// Then we process each strongly connected component (SCC) of the graph
// in topological order, assigning it a PE label based on the set P of
// PE labels that flow to it from its immediate dependencies.
//
// If any node in P is "indirect", the entire SCC is assigned a fresh PE
// label. Otherwise:
//
// |P|=0 if P is empty, all nodes in the SCC are non-pointers (e.g.
// uninitialized variables, or formal params of dead functions)
// and the SCC is assigned the PE label of zero.
//
// |P|=1 if P is a singleton, the SCC is assigned the same label as the
// sole element of P.
//
// |P|>1 if P contains multiple labels, a unique label representing P is
// invented and recorded in an hash table, so that other
// equivalent SCCs may also be assigned this label, akin to
// conventional hash-value numbering in a compiler.
//
// Finally, a renumbering is computed such that each node is replaced by
// the lowest-numbered node with the same PE label. All constraints are
// renumbered, and any resulting duplicates are eliminated.
//
// The only nodes that are not renumbered are the objects x in addr
// (y=&x) constraints, since the ids of these nodes (and fields derived
// from them via offsetAddr rules) are the elements of all points-to
// sets, so they must remain as they are if we want the same solution.
//
// The solverStates (node.solve) for nodes in the same equivalence class
// are linked together so that all nodes in the class have the same
// solution. This avoids the need to renumber nodeids buried in
// Queries, cgnodes, etc (like (*analysis).renumber() does) since only
// the solution is needed.
//
// The result of HVN is that the number of distinct nodes and
// constraints is reduced, but the solution is identical (almost---see
// CROSS-CHECK below). In particular, both linear and cyclic chains of
// copies are each replaced by a single node.
//
// Nodes and constraints created "online" (e.g. while solving reflection
// constraints) are not subject to this optimization.
//
// PERFORMANCE
//
// In two benchmarks (guru and godoc), HVN eliminates about two thirds
// of nodes, the majority accounted for by non-pointers: nodes of
// non-pointer type, pointers that remain nil, formal parameters of dead
// functions, nodes of untracked types, etc. It also reduces the number
// of constraints, also by about two thirds, and the solving time by
// 30--42%, although we must pay about 15% for the running time of HVN
// itself. The benefit is greater for larger applications.
//
// There are many possible optimizations to improve the performance:
// * Use fewer than 1:1 onodes to main graph nodes: many of the onodes
// we create are not needed.
// * HU (HVN with Union---see paper): coalesce "union" peLabels when
// their expanded-out sets are equal.
// * HR (HVN with deReference---see paper): this will require that we
// apply HVN until fixed point, which may need more bookkeeping of the
// correspondance of main nodes to onodes.
// * Location Equivalence (see paper): have points-to sets contain not
// locations but location-equivalence class labels, each representing
// a set of locations.
// * HVN with field-sensitive ref: model each of the fields of a
// pointer-to-struct.
//
// CROSS-CHECK
//
// To verify the soundness of the optimization, when the
// debugHVNCrossCheck option is enabled, we run the solver twice, once
// before and once after running HVN, dumping the solution to disk, and
// then we compare the results. If they are not identical, the analysis
// panics.
//
// The solution dumped to disk includes only the N*N submatrix of the
// complete solution where N is the number of nodes after generation.
// In other words, we ignore pointer variables and objects created by
// the solver itself, since their numbering depends on the solver order,
// which is affected by the optimization. In any case, that's the only
// part the client cares about.
//
// The cross-check is too strict and may fail spuriously. Although the
// H&L paper describing HVN states that the solutions obtained should be
// identical, this is not the case in practice because HVN can collapse
// cycles involving *p even when pts(p)={}. Consider this example
// distilled from testdata/hello.go:
//
// var x T
// func f(p **T) {
// t0 = *p
// ...
// t1 = φ(t0, &x)
// *p = t1
// }
//
// If f is dead code, we get:
// unoptimized: pts(p)={} pts(t0)={} pts(t1)={&x}
// optimized: pts(p)={} pts(t0)=pts(t1)=pts(*p)={&x}
//
// It's hard to argue that this is a bug: the result is sound and the
// loss of precision is inconsequential---f is dead code, after all.
// But unfortunately it limits the usefulness of the cross-check since
// failures must be carefully analyzed. Ben Hardekopf suggests (in
// personal correspondence) some approaches to mitigating it:
//
// If there is a node with an HVN points-to set that is a superset
// of the NORM points-to set, then either it's a bug or it's a
// result of this issue. If it's a result of this issue, then in
// the offline constraint graph there should be a REF node inside
// some cycle that reaches this node, and in the NORM solution the
// pointer being dereferenced by that REF node should be the empty
// set. If that isn't true then this is a bug. If it is true, then
// you can further check that in the NORM solution the "extra"
// points-to info in the HVN solution does in fact come from that
// purported cycle (if it doesn't, then this is still a bug). If
// you're doing the further check then you'll need to do it for
// each "extra" points-to element in the HVN points-to set.
//
// There are probably ways to optimize these checks by taking
// advantage of graph properties. For example, extraneous points-to
// info will flow through the graph and end up in many
// nodes. Rather than checking every node with extra info, you
// could probably work out the "origin point" of the extra info and
// just check there. Note that the check in the first bullet is
// looking for soundness bugs, while the check in the second bullet
// is looking for precision bugs; depending on your needs, you may
// care more about one than the other.
//
// which we should evaluate. The cross-check is nonetheless invaluable
// for all but one of the programs in the pointer_test suite.
import (
"fmt"
"go/types"
"io"
"reflect"
"golang.org/x/tools/container/intsets"
)
// A peLabel is a pointer-equivalence label: two nodes with the same
// peLabel have identical points-to solutions.
//
// The numbers are allocated consecutively like so:
// 0 not a pointer
// 1..N-1 addrConstraints (equals the constraint's .src field, hence sparse)
// ... offsetAddr constraints
// ... SCCs (with indirect nodes or multiple inputs)
//
// Each PE label denotes a set of pointers containing a single addr, a
// single offsetAddr, or some set of other PE labels.
//
type peLabel int
type hvn struct {
a *analysis
N int // len(a.nodes) immediately after constraint generation
log io.Writer // (optional) log of HVN lemmas
onodes []*onode // nodes of the offline graph
label peLabel // the next available PE label
hvnLabel map[string]peLabel // hash-value numbering (PE label) for each set of onodeids
stack []onodeid // DFS stack
index int32 // next onode.index, from Tarjan's SCC algorithm
// For each distinct offsetAddrConstraint (src, offset) pair,
// offsetAddrLabels records a unique PE label >= N.
offsetAddrLabels map[offsetAddr]peLabel
}
// The index of an node in the offline graph.
// (Currently the first N align with the main nodes,
// but this may change with HRU.)
type onodeid uint32
// An onode is a node in the offline constraint graph.
// (Where ambiguous, members of analysis.nodes are referred to as
// "main graph" nodes.)
//
// Edges in the offline constraint graph (edges and implicit) point to
// the source, i.e. against the flow of values: they are dependencies.
// Implicit edges are used for SCC computation, but not for gathering
// incoming labels.
//
type onode struct {
rep onodeid // index of representative of SCC in offline constraint graph
edges intsets.Sparse // constraint edges X-->Y (this onode is X)
implicit intsets.Sparse // implicit edges *X-->*Y (this onode is X)
peLabels intsets.Sparse // set of peLabels are pointer-equivalent to this one
indirect bool // node has points-to relations not represented in graph
// Tarjan's SCC algorithm
index, lowlink int32 // Tarjan numbering
scc int32 // -ve => on stack; 0 => unvisited; +ve => node is root of a found SCC
}
type offsetAddr struct {
ptr nodeid
offset uint32
}
// nextLabel issues the next unused pointer-equivalence label.
func (h *hvn) nextLabel() peLabel {
h.label++
return h.label
}
// ref(X) returns the index of the onode for *X.
func (h *hvn) ref(id onodeid) onodeid {
return id + onodeid(len(h.a.nodes))
}
// hvn computes pointer-equivalence labels (peLabels) using the Hash-based
// Value Numbering (HVN) algorithm described in Hardekopf & Lin, SAS'07.
//
func (a *analysis) hvn() {
start("HVN")
if a.log != nil {
fmt.Fprintf(a.log, "\n\n==== Pointer equivalence optimization\n\n")
}
h := hvn{
a: a,
N: len(a.nodes),
log: a.log,
hvnLabel: make(map[string]peLabel),
offsetAddrLabels: make(map[offsetAddr]peLabel),
}
if h.log != nil {
fmt.Fprintf(h.log, "\nCreating offline graph nodes...\n")
}
// Create offline nodes. The first N nodes correspond to main
// graph nodes; the next N are their corresponding ref() nodes.
h.onodes = make([]*onode, 2*h.N)
for id := range a.nodes {
id := onodeid(id)
h.onodes[id] = &onode{}
h.onodes[h.ref(id)] = &onode{indirect: true}
}
// Each node initially represents just itself.
for id, o := range h.onodes {
o.rep = onodeid(id)
}
h.markIndirectNodes()
// Reserve the first N PE labels for addrConstraints.
h.label = peLabel(h.N)
// Add offline constraint edges.
if h.log != nil {
fmt.Fprintf(h.log, "\nAdding offline graph edges...\n")
}
for _, c := range a.constraints {
if debugHVNVerbose && h.log != nil {
fmt.Fprintf(h.log, "; %s\n", c)
}
c.presolve(&h)
}
// Find and collapse SCCs.
if h.log != nil {
fmt.Fprintf(h.log, "\nFinding SCCs...\n")
}
h.index = 1
for id, o := range h.onodes {
if id > 0 && o.index == 0 {
// Start depth-first search at each unvisited node.
h.visit(onodeid(id))
}
}
// Dump the solution
// (NB: somewhat redundant with logging from simplify().)
if debugHVNVerbose && h.log != nil {
fmt.Fprintf(h.log, "\nPointer equivalences:\n")
for id, o := range h.onodes {
if id == 0 {
continue
}
if id == int(h.N) {
fmt.Fprintf(h.log, "---\n")
}
fmt.Fprintf(h.log, "o%d\t", id)
if o.rep != onodeid(id) {
fmt.Fprintf(h.log, "rep=o%d", o.rep)
} else {
fmt.Fprintf(h.log, "p%d", o.peLabels.Min())
if o.indirect {
fmt.Fprint(h.log, " indirect")
}
}
fmt.Fprintln(h.log)
}
}
// Simplify the main constraint graph
h.simplify()
a.showCounts()
stop("HVN")
}
// ---- constraint-specific rules ----
// dst := &src
func (c *addrConstraint) presolve(h *hvn) {
// Each object (src) is an initial PE label.
label := peLabel(c.src) // label < N
if debugHVNVerbose && h.log != nil {
// duplicate log messages are possible
fmt.Fprintf(h.log, "\tcreate p%d: {&n%d}\n", label, c.src)
}
odst := onodeid(c.dst)
osrc := onodeid(c.src)
// Assign dst this label.
h.onodes[odst].peLabels.Insert(int(label))
if debugHVNVerbose && h.log != nil {
fmt.Fprintf(h.log, "\to%d has p%d\n", odst, label)
}
h.addImplicitEdge(h.ref(odst), osrc) // *dst ~~> src.
}
// dst = src
func (c *copyConstraint) presolve(h *hvn) {
odst := onodeid(c.dst)
osrc := onodeid(c.src)
h.addEdge(odst, osrc) // dst --> src
h.addImplicitEdge(h.ref(odst), h.ref(osrc)) // *dst ~~> *src
}
// dst = *src + offset
func (c *loadConstraint) presolve(h *hvn) {
odst := onodeid(c.dst)
osrc := onodeid(c.src)
if c.offset == 0 {
h.addEdge(odst, h.ref(osrc)) // dst --> *src
} else {
// We don't interpret load-with-offset, e.g. results
// of map value lookup, R-block of dynamic call, slice
// copy/append, reflection.
h.markIndirect(odst, "load with offset")
}
}
// *dst + offset = src
func (c *storeConstraint) presolve(h *hvn) {
odst := onodeid(c.dst)
osrc := onodeid(c.src)
if c.offset == 0 {
h.onodes[h.ref(odst)].edges.Insert(int(osrc)) // *dst --> src
if debugHVNVerbose && h.log != nil {
fmt.Fprintf(h.log, "\to%d --> o%d\n", h.ref(odst), osrc)
}
} else {
// We don't interpret store-with-offset.
// See discussion of soundness at markIndirectNodes.
}
}
// dst = &src.offset
func (c *offsetAddrConstraint) presolve(h *hvn) {
// Give each distinct (addr, offset) pair a fresh PE label.
// The cache performs CSE, effectively.
key := offsetAddr{c.src, c.offset}
label, ok := h.offsetAddrLabels[key]
if !ok {
label = h.nextLabel()
h.offsetAddrLabels[key] = label
if debugHVNVerbose && h.log != nil {
fmt.Fprintf(h.log, "\tcreate p%d: {&n%d.#%d}\n",
label, c.src, c.offset)
}
}
// Assign dst this label.
h.onodes[c.dst].peLabels.Insert(int(label))
if debugHVNVerbose && h.log != nil {
fmt.Fprintf(h.log, "\to%d has p%d\n", c.dst, label)
}
}
// dst = src.(typ) where typ is an interface
func (c *typeFilterConstraint) presolve(h *hvn) {
h.markIndirect(onodeid(c.dst), "typeFilter result")
}
// dst = src.(typ) where typ is concrete
func (c *untagConstraint) presolve(h *hvn) {
odst := onodeid(c.dst)
for end := odst + onodeid(h.a.sizeof(c.typ)); odst < end; odst++ {
h.markIndirect(odst, "untag result")
}
}
// dst = src.method(c.params...)
func (c *invokeConstraint) presolve(h *hvn) {
// All methods are address-taken functions, so
// their formal P-blocks were already marked indirect.
// Mark the caller's targets node as indirect.
sig := c.method.Type().(*types.Signature)
id := c.params
h.markIndirect(onodeid(c.params), "invoke targets node")
id++
id += nodeid(h.a.sizeof(sig.Params()))
// Mark the caller's R-block as indirect.
end := id + nodeid(h.a.sizeof(sig.Results()))
for id < end {
h.markIndirect(onodeid(id), "invoke R-block")
id++
}
}
// markIndirectNodes marks as indirect nodes whose points-to relations
// are not entirely captured by the offline graph, including:
//
// (a) All address-taken nodes (including the following nodes within
// the same object). This is described in the paper.
//
// The most subtle cause of indirect nodes is the generation of
// store-with-offset constraints since the offline graph doesn't
// represent them. A global audit of constraint generation reveals the
// following uses of store-with-offset:
//
// (b) genDynamicCall, for P-blocks of dynamically called functions,
// to which dynamic copy edges will be added to them during
// solving: from storeConstraint for standalone functions,
// and from invokeConstraint for methods.
// All such P-blocks must be marked indirect.
// (c) MakeUpdate, to update the value part of a map object.
// All MakeMap objects's value parts must be marked indirect.
// (d) copyElems, to update the destination array.
// All array elements must be marked indirect.
//
// Not all indirect marking happens here. ref() nodes are marked
// indirect at construction, and each constraint's presolve() method may
// mark additional nodes.
//
func (h *hvn) markIndirectNodes() {
// (a) all address-taken nodes, plus all nodes following them
// within the same object, since these may be indirectly
// stored or address-taken.
for _, c := range h.a.constraints {
if c, ok := c.(*addrConstraint); ok {
start := h.a.enclosingObj(c.src)
end := start + nodeid(h.a.nodes[start].obj.size)
for id := c.src; id < end; id++ {
h.markIndirect(onodeid(id), "A-T object")
}
}
}
// (b) P-blocks of all address-taken functions.
for id := 0; id < h.N; id++ {
obj := h.a.nodes[id].obj
// TODO(adonovan): opt: if obj.cgn.fn is a method and
// obj.cgn is not its shared contour, this is an
// "inlined" static method call. We needn't consider it
// address-taken since no invokeConstraint will affect it.
if obj != nil && obj.flags&otFunction != 0 && h.a.atFuncs[obj.cgn.fn] {
// address-taken function
if debugHVNVerbose && h.log != nil {
fmt.Fprintf(h.log, "n%d is address-taken: %s\n", id, obj.cgn.fn)
}
h.markIndirect(onodeid(id), "A-T func identity")
id++
sig := obj.cgn.fn.Signature
psize := h.a.sizeof(sig.Params())
if sig.Recv() != nil {
psize += h.a.sizeof(sig.Recv().Type())
}
for end := id + int(psize); id < end; id++ {
h.markIndirect(onodeid(id), "A-T func P-block")
}
id--
continue
}
}
// (c) all map objects' value fields.
for _, id := range h.a.mapValues {
h.markIndirect(onodeid(id), "makemap.value")
}
// (d) all array element objects.
// TODO(adonovan): opt: can we do better?
for id := 0; id < h.N; id++ {
// Identity node for an object of array type?
if tArray, ok := h.a.nodes[id].typ.(*types.Array); ok {
// Mark the array element nodes indirect.
// (Skip past the identity field.)
for range h.a.flatten(tArray.Elem()) {
id++
h.markIndirect(onodeid(id), "array elem")
}
}
}
}
func (h *hvn) markIndirect(oid onodeid, comment string) {
h.onodes[oid].indirect = true
if debugHVNVerbose && h.log != nil {
fmt.Fprintf(h.log, "\to%d is indirect: %s\n", oid, comment)
}
}
// Adds an edge dst-->src.
// Note the unusual convention: edges are dependency (contraflow) edges.
func (h *hvn) addEdge(odst, osrc onodeid) {
h.onodes[odst].edges.Insert(int(osrc))
if debugHVNVerbose && h.log != nil {
fmt.Fprintf(h.log, "\to%d --> o%d\n", odst, osrc)
}
}
func (h *hvn) addImplicitEdge(odst, osrc onodeid) {
h.onodes[odst].implicit.Insert(int(osrc))
if debugHVNVerbose && h.log != nil {
fmt.Fprintf(h.log, "\to%d ~~> o%d\n", odst, osrc)
}
}
// visit implements the depth-first search of Tarjan's SCC algorithm.
// Precondition: x is canonical.
func (h *hvn) visit(x onodeid) {
h.checkCanonical(x)
xo := h.onodes[x]
xo.index = h.index
xo.lowlink = h.index
h.index++
h.stack = append(h.stack, x) // push
assert(xo.scc == 0, "node revisited")
xo.scc = -1
var deps []int
deps = xo.edges.AppendTo(deps)
deps = xo.implicit.AppendTo(deps)
for _, y := range deps {
// Loop invariant: x is canonical.
y := h.find(onodeid(y))
if x == y {
continue // nodes already coalesced
}
xo := h.onodes[x]
yo := h.onodes[y]
switch {
case yo.scc > 0:
// y is already a collapsed SCC
case yo.scc < 0:
// y is on the stack, and thus in the current SCC.
if yo.index < xo.lowlink {
xo.lowlink = yo.index
}
default:
// y is unvisited; visit it now.
h.visit(y)
// Note: x and y are now non-canonical.
x = h.find(onodeid(x))
if yo.lowlink < xo.lowlink {
xo.lowlink = yo.lowlink
}
}
}
h.checkCanonical(x)
// Is x the root of an SCC?
if xo.lowlink == xo.index {
// Coalesce all nodes in the SCC.
if debugHVNVerbose && h.log != nil {
fmt.Fprintf(h.log, "scc o%d\n", x)
}
for {
// Pop y from stack.
i := len(h.stack) - 1
y := h.stack[i]
h.stack = h.stack[:i]
h.checkCanonical(x)
xo := h.onodes[x]
h.checkCanonical(y)
yo := h.onodes[y]
if xo == yo {
// SCC is complete.
xo.scc = 1
h.labelSCC(x)
break
}
h.coalesce(x, y)
}
}
}
// Precondition: x is canonical.
func (h *hvn) labelSCC(x onodeid) {
h.checkCanonical(x)
xo := h.onodes[x]
xpe := &xo.peLabels
// All indirect nodes get new labels.
if xo.indirect {
label := h.nextLabel()
if debugHVNVerbose && h.log != nil {
fmt.Fprintf(h.log, "\tcreate p%d: indirect SCC\n", label)
fmt.Fprintf(h.log, "\to%d has p%d\n", x, label)
}
// Remove pre-labeling, in case a direct pre-labeled node was
// merged with an indirect one.
xpe.Clear()
xpe.Insert(int(label))
return
}
// Invariant: all peLabels sets are non-empty.
// Those that are logically empty contain zero as their sole element.
// No other sets contains zero.
// Find all labels coming in to the coalesced SCC node.
for _, y := range xo.edges.AppendTo(nil) {
y := h.find(onodeid(y))
if y == x {
continue // already coalesced
}
ype := &h.onodes[y].peLabels
if debugHVNVerbose && h.log != nil {
fmt.Fprintf(h.log, "\tedge from o%d = %s\n", y, ype)
}
if ype.IsEmpty() {
if debugHVNVerbose && h.log != nil {
fmt.Fprintf(h.log, "\tnode has no PE label\n")
}
}
assert(!ype.IsEmpty(), "incoming node has no PE label")
if ype.Has(0) {
// {0} represents a non-pointer.
assert(ype.Len() == 1, "PE set contains {0, ...}")
} else {
xpe.UnionWith(ype)
}
}
switch xpe.Len() {
case 0:
// SCC has no incoming non-zero PE labels: it is a non-pointer.
xpe.Insert(0)
case 1:
// already a singleton
default:
// SCC has multiple incoming non-zero PE labels.
// Find the canonical label representing this set.
// We use String() as a fingerprint consistent with Equals().
key := xpe.String()
label, ok := h.hvnLabel[key]
if !ok {
label = h.nextLabel()
if debugHVNVerbose && h.log != nil {
fmt.Fprintf(h.log, "\tcreate p%d: union %s\n", label, xpe.String())
}
h.hvnLabel[key] = label
}
xpe.Clear()
xpe.Insert(int(label))
}
if debugHVNVerbose && h.log != nil {
fmt.Fprintf(h.log, "\to%d has p%d\n", x, xpe.Min())
}
}
// coalesce combines two nodes in the offline constraint graph.
// Precondition: x and y are canonical.
func (h *hvn) coalesce(x, y onodeid) {
xo := h.onodes[x]
yo := h.onodes[y]
// x becomes y's canonical representative.
yo.rep = x
if debugHVNVerbose && h.log != nil {
fmt.Fprintf(h.log, "\tcoalesce o%d into o%d\n", y, x)
}
// x accumulates y's edges.
xo.edges.UnionWith(&yo.edges)
yo.edges.Clear()
// x accumulates y's implicit edges.
xo.implicit.UnionWith(&yo.implicit)
yo.implicit.Clear()
// x accumulates y's pointer-equivalence labels.
xo.peLabels.UnionWith(&yo.peLabels)
yo.peLabels.Clear()
// x accumulates y's indirect flag.
if yo.indirect {
xo.indirect = true
}
}
// simplify computes a degenerate renumbering of nodeids from the PE
// labels assigned by the hvn, and uses it to simplify the main
// constraint graph, eliminating non-pointer nodes and duplicate
// constraints.
//
func (h *hvn) simplify() {
// canon maps each peLabel to its canonical main node.
canon := make([]nodeid, h.label)
for i := range canon {
canon[i] = nodeid(h.N) // indicates "unset"
}
// mapping maps each main node index to the index of the canonical node.
mapping := make([]nodeid, len(h.a.nodes))
for id := range h.a.nodes {
id := nodeid(id)
if id == 0 {
canon[0] = 0
mapping[0] = 0
continue
}
oid := h.find(onodeid(id))
peLabels := &h.onodes[oid].peLabels
assert(peLabels.Len() == 1, "PE class is not a singleton")
label := peLabel(peLabels.Min())
canonId := canon[label]
if canonId == nodeid(h.N) {
// id becomes the representative of the PE label.
canonId = id
canon[label] = canonId
if h.a.log != nil {
fmt.Fprintf(h.a.log, "\tpts(n%d) is canonical : \t(%s)\n",
id, h.a.nodes[id].typ)
}
} else {
// Link the solver states for the two nodes.
assert(h.a.nodes[canonId].solve != nil, "missing solver state")
h.a.nodes[id].solve = h.a.nodes[canonId].solve
if h.a.log != nil {
// TODO(adonovan): debug: reorganize the log so it prints
// one line:
// pe y = x1, ..., xn
// for each canonical y. Requires allocation.
fmt.Fprintf(h.a.log, "\tpts(n%d) = pts(n%d) : %s\n",
id, canonId, h.a.nodes[id].typ)
}
}
mapping[id] = canonId
}
// Renumber the constraints, eliminate duplicates, and eliminate
// any containing non-pointers (n0).
addrs := make(map[addrConstraint]bool)
copys := make(map[copyConstraint]bool)
loads := make(map[loadConstraint]bool)
stores := make(map[storeConstraint]bool)
offsetAddrs := make(map[offsetAddrConstraint]bool)
untags := make(map[untagConstraint]bool)
typeFilters := make(map[typeFilterConstraint]bool)
invokes := make(map[invokeConstraint]bool)
nbefore := len(h.a.constraints)
cc := h.a.constraints[:0] // in-situ compaction
for _, c := range h.a.constraints {
// Renumber.
switch c := c.(type) {
case *addrConstraint:
// Don't renumber c.src since it is the label of
// an addressable object and will appear in PT sets.
c.dst = mapping[c.dst]
default:
c.renumber(mapping)
}
if c.ptr() == 0 {
continue // skip: constraint attached to non-pointer
}
var dup bool
switch c := c.(type) {
case *addrConstraint:
_, dup = addrs[*c]
addrs[*c] = true
case *copyConstraint:
if c.src == c.dst {
continue // skip degenerate copies
}
if c.src == 0 {
continue // skip copy from non-pointer
}
_, dup = copys[*c]
copys[*c] = true
case *loadConstraint:
if c.src == 0 {
continue // skip load from non-pointer
}
_, dup = loads[*c]
loads[*c] = true
case *storeConstraint:
if c.src == 0 {
continue // skip store from non-pointer
}
_, dup = stores[*c]
stores[*c] = true
case *offsetAddrConstraint:
if c.src == 0 {
continue // skip offset from non-pointer
}
_, dup = offsetAddrs[*c]
offsetAddrs[*c] = true
case *untagConstraint:
if c.src == 0 {
continue // skip untag of non-pointer
}
_, dup = untags[*c]
untags[*c] = true
case *typeFilterConstraint:
if c.src == 0 {
continue // skip filter of non-pointer
}
_, dup = typeFilters[*c]
typeFilters[*c] = true
case *invokeConstraint:
if c.params == 0 {
panic("non-pointer invoke.params")
}
if c.iface == 0 {
continue // skip invoke on non-pointer
}
_, dup = invokes[*c]
invokes[*c] = true
default:
// We don't bother de-duping advanced constraints
// (e.g. reflection) since they are uncommon.
// Eliminate constraints containing non-pointer nodeids.
//
// We use reflection to find the fields to avoid
// adding yet another method to constraint.
//
// TODO(adonovan): experiment with a constraint
// method that returns a slice of pointers to
// nodeids fields to enable uniform iteration;
// the renumber() method could be removed and
// implemented using the new one.
//
// TODO(adonovan): opt: this is unsound since
// some constraints still have an effect if one
// of the operands is zero: rVCall, rVMapIndex,
// rvSetMapIndex. Handle them specially.
rtNodeid := reflect.TypeOf(nodeid(0))
x := reflect.ValueOf(c).Elem()
for i, nf := 0, x.NumField(); i < nf; i++ {
f := x.Field(i)
if f.Type() == rtNodeid {
if f.Uint() == 0 {
dup = true // skip it
break
}
}
}
}
if dup {
continue // skip duplicates
}
cc = append(cc, c)
}
h.a.constraints = cc
if h.log != nil {
fmt.Fprintf(h.log, "#constraints: was %d, now %d\n", nbefore, len(h.a.constraints))
}
}
// find returns the canonical onodeid for x.
// (The onodes form a disjoint set forest.)
func (h *hvn) find(x onodeid) onodeid {
// TODO(adonovan): opt: this is a CPU hotspot. Try "union by rank".
xo := h.onodes[x]
rep := xo.rep
if rep != x {
rep = h.find(rep) // simple path compression
xo.rep = rep
}
return rep
}
func (h *hvn) checkCanonical(x onodeid) {
if debugHVN {
assert(x == h.find(x), "not canonical")
}
}
func assert(p bool, msg string) {
if debugHVN && !p {
panic("assertion failed: " + msg)
}
}

361
vendor/golang.org/x/tools/go/pointer/intrinsics.go generated vendored Normal file
View File

@@ -0,0 +1,361 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pointer
// This package defines the treatment of intrinsics, i.e. library
// functions requiring special analytical treatment.
//
// Most of these are C or assembly functions, but even some Go
// functions require may special treatment if the analysis completely
// replaces the implementation of an API such as reflection.
// TODO(adonovan): support a means of writing analytic summaries in
// the target code, so that users can summarise the effects of their
// own C functions using a snippet of Go.
import (
"fmt"
"go/types"
"golang.org/x/tools/go/ssa"
)
// Instances of 'intrinsic' generate analysis constraints for calls to
// intrinsic functions.
// Implementations may exploit information from the calling site
// via cgn.callersite; for shared contours this is nil.
type intrinsic func(a *analysis, cgn *cgnode)
// Initialized in explicit init() to defeat (spurious) initialization
// cycle error.
var intrinsicsByName = make(map[string]intrinsic)
func init() {
// Key strings are from Function.String().
// That little dot ۰ is an Arabic zero numeral (U+06F0),
// categories [Nd].
for name, fn := range map[string]intrinsic{
// Other packages.
"bytes.Equal": ext۰NoEffect,
"bytes.IndexByte": ext۰NoEffect,
"crypto/aes.decryptBlockAsm": ext۰NoEffect,
"crypto/aes.encryptBlockAsm": ext۰NoEffect,
"crypto/aes.expandKeyAsm": ext۰NoEffect,
"crypto/aes.hasAsm": ext۰NoEffect,
"crypto/md5.block": ext۰NoEffect,
"crypto/rc4.xorKeyStream": ext۰NoEffect,
"crypto/sha1.block": ext۰NoEffect,
"crypto/sha256.block": ext۰NoEffect,
"hash/crc32.castagnoliSSE42": ext۰NoEffect,
"hash/crc32.haveSSE42": ext۰NoEffect,
"math.Abs": ext۰NoEffect,
"math.Acos": ext۰NoEffect,
"math.Asin": ext۰NoEffect,
"math.Atan": ext۰NoEffect,
"math.Atan2": ext۰NoEffect,
"math.Ceil": ext۰NoEffect,
"math.Cos": ext۰NoEffect,
"math.Dim": ext۰NoEffect,
"math.Exp": ext۰NoEffect,
"math.Exp2": ext۰NoEffect,
"math.Expm1": ext۰NoEffect,
"math.Float32bits": ext۰NoEffect,
"math.Float32frombits": ext۰NoEffect,
"math.Float64bits": ext۰NoEffect,
"math.Float64frombits": ext۰NoEffect,
"math.Floor": ext۰NoEffect,
"math.Frexp": ext۰NoEffect,
"math.Hypot": ext۰NoEffect,
"math.Ldexp": ext۰NoEffect,
"math.Log": ext۰NoEffect,
"math.Log10": ext۰NoEffect,
"math.Log1p": ext۰NoEffect,
"math.Log2": ext۰NoEffect,
"math.Max": ext۰NoEffect,
"math.Min": ext۰NoEffect,
"math.Mod": ext۰NoEffect,
"math.Modf": ext۰NoEffect,
"math.Remainder": ext۰NoEffect,
"math.Sin": ext۰NoEffect,
"math.Sincos": ext۰NoEffect,
"math.Sqrt": ext۰NoEffect,
"math.Tan": ext۰NoEffect,
"math.Trunc": ext۰NoEffect,
"math/big.addMulVVW": ext۰NoEffect,
"math/big.addVV": ext۰NoEffect,
"math/big.addVW": ext۰NoEffect,
"math/big.bitLen": ext۰NoEffect,
"math/big.divWVW": ext۰NoEffect,
"math/big.divWW": ext۰NoEffect,
"math/big.mulAddVWW": ext۰NoEffect,
"math/big.mulWW": ext۰NoEffect,
"math/big.shlVU": ext۰NoEffect,
"math/big.shrVU": ext۰NoEffect,
"math/big.subVV": ext۰NoEffect,
"math/big.subVW": ext۰NoEffect,
"net.runtime_Semacquire": ext۰NoEffect,
"net.runtime_Semrelease": ext۰NoEffect,
"net.runtime_pollClose": ext۰NoEffect,
"net.runtime_pollOpen": ext۰NoEffect,
"net.runtime_pollReset": ext۰NoEffect,
"net.runtime_pollServerInit": ext۰NoEffect,
"net.runtime_pollSetDeadline": ext۰NoEffect,
"net.runtime_pollUnblock": ext۰NoEffect,
"net.runtime_pollWait": ext۰NoEffect,
"net.runtime_pollWaitCanceled": ext۰NoEffect,
"os.epipecheck": ext۰NoEffect,
// All other runtime functions are treated as NoEffect.
"runtime.SetFinalizer": ext۰runtime۰SetFinalizer,
"strings.IndexByte": ext۰NoEffect,
"sync.runtime_Semacquire": ext۰NoEffect,
"sync.runtime_Semrelease": ext۰NoEffect,
"sync.runtime_Syncsemacquire": ext۰NoEffect,
"sync.runtime_Syncsemcheck": ext۰NoEffect,
"sync.runtime_Syncsemrelease": ext۰NoEffect,
"sync.runtime_procPin": ext۰NoEffect,
"sync.runtime_procUnpin": ext۰NoEffect,
"sync.runtime_registerPool": ext۰NoEffect,
"sync/atomic.AddInt32": ext۰NoEffect,
"sync/atomic.AddInt64": ext۰NoEffect,
"sync/atomic.AddUint32": ext۰NoEffect,
"sync/atomic.AddUint64": ext۰NoEffect,
"sync/atomic.AddUintptr": ext۰NoEffect,
"sync/atomic.CompareAndSwapInt32": ext۰NoEffect,
"sync/atomic.CompareAndSwapUint32": ext۰NoEffect,
"sync/atomic.CompareAndSwapUint64": ext۰NoEffect,
"sync/atomic.CompareAndSwapUintptr": ext۰NoEffect,
"sync/atomic.LoadInt32": ext۰NoEffect,
"sync/atomic.LoadInt64": ext۰NoEffect,
"sync/atomic.LoadPointer": ext۰NoEffect, // ignore unsafe.Pointers
"sync/atomic.LoadUint32": ext۰NoEffect,
"sync/atomic.LoadUint64": ext۰NoEffect,
"sync/atomic.LoadUintptr": ext۰NoEffect,
"sync/atomic.StoreInt32": ext۰NoEffect,
"sync/atomic.StorePointer": ext۰NoEffect, // ignore unsafe.Pointers
"sync/atomic.StoreUint32": ext۰NoEffect,
"sync/atomic.StoreUintptr": ext۰NoEffect,
"syscall.Close": ext۰NoEffect,
"syscall.Exit": ext۰NoEffect,
"syscall.Getpid": ext۰NoEffect,
"syscall.Getwd": ext۰NoEffect,
"syscall.Kill": ext۰NoEffect,
"syscall.RawSyscall": ext۰NoEffect,
"syscall.RawSyscall6": ext۰NoEffect,
"syscall.Syscall": ext۰NoEffect,
"syscall.Syscall6": ext۰NoEffect,
"syscall.runtime_AfterFork": ext۰NoEffect,
"syscall.runtime_BeforeFork": ext۰NoEffect,
"syscall.setenv_c": ext۰NoEffect,
"time.Sleep": ext۰NoEffect,
"time.now": ext۰NoEffect,
"time.startTimer": ext۰time۰startTimer,
"time.stopTimer": ext۰NoEffect,
} {
intrinsicsByName[name] = fn
}
}
// findIntrinsic returns the constraint generation function for an
// intrinsic function fn, or nil if the function should be handled normally.
//
func (a *analysis) findIntrinsic(fn *ssa.Function) intrinsic {
// Consult the *Function-keyed cache.
// A cached nil indicates a normal non-intrinsic function.
impl, ok := a.intrinsics[fn]
if !ok {
impl = intrinsicsByName[fn.String()] // may be nil
if a.isReflect(fn) {
if !a.config.Reflection {
impl = ext۰NoEffect // reflection disabled
} else if impl == nil {
// Ensure all "reflect" code is treated intrinsically.
impl = ext۰NotYetImplemented
}
} else if impl == nil && fn.Pkg != nil && fn.Pkg.Pkg.Path() == "runtime" {
// Ignore "runtime" (except SetFinalizer):
// it has few interesting effects on aliasing
// and is full of unsafe code we can't analyze.
impl = ext۰NoEffect
}
a.intrinsics[fn] = impl
}
return impl
}
// isReflect reports whether fn belongs to the "reflect" package.
func (a *analysis) isReflect(fn *ssa.Function) bool {
if a.reflectValueObj == nil {
return false // "reflect" package not loaded
}
reflectPackage := a.reflectValueObj.Pkg()
if fn.Pkg != nil && fn.Pkg.Pkg == reflectPackage {
return true
}
// Synthetic wrappers have a nil Pkg, so they slip through the
// previous check. Check the receiver package.
// TODO(adonovan): should synthetic wrappers have a non-nil Pkg?
if recv := fn.Signature.Recv(); recv != nil {
if named, ok := deref(recv.Type()).(*types.Named); ok {
if named.Obj().Pkg() == reflectPackage {
return true // e.g. wrapper of (reflect.Value).f
}
}
}
return false
}
// A trivial intrinsic suitable for any function that does not:
// 1) induce aliases between its arguments or any global variables;
// 2) call any functions; or
// 3) create any labels.
//
// Many intrinsics (such as CompareAndSwapInt32) have a fourth kind of
// effect: loading or storing through a pointer. Though these could
// be significant, we deliberately ignore them because they are
// generally not worth the effort.
//
// We sometimes violate condition #3 if the function creates only
// non-function labels, as the control-flow graph is still sound.
//
func ext۰NoEffect(a *analysis, cgn *cgnode) {}
func ext۰NotYetImplemented(a *analysis, cgn *cgnode) {
fn := cgn.fn
a.warnf(fn.Pos(), "unsound: intrinsic treatment of %s not yet implemented", fn)
}
// ---------- func runtime.SetFinalizer(x, f interface{}) ----------
// runtime.SetFinalizer(x, f)
type runtimeSetFinalizerConstraint struct {
targets nodeid // (indirect)
f nodeid // (ptr)
x nodeid
}
func (c *runtimeSetFinalizerConstraint) ptr() nodeid { return c.f }
func (c *runtimeSetFinalizerConstraint) presolve(h *hvn) {
h.markIndirect(onodeid(c.targets), "SetFinalizer.targets")
}
func (c *runtimeSetFinalizerConstraint) renumber(mapping []nodeid) {
c.targets = mapping[c.targets]
c.f = mapping[c.f]
c.x = mapping[c.x]
}
func (c *runtimeSetFinalizerConstraint) String() string {
return fmt.Sprintf("runtime.SetFinalizer(n%d, n%d)", c.x, c.f)
}
func (c *runtimeSetFinalizerConstraint) solve(a *analysis, delta *nodeset) {
for _, fObj := range delta.AppendTo(a.deltaSpace) {
tDyn, f, indirect := a.taggedValue(nodeid(fObj))
if indirect {
// TODO(adonovan): we'll need to implement this
// when we start creating indirect tagged objects.
panic("indirect tagged object")
}
tSig, ok := tDyn.Underlying().(*types.Signature)
if !ok {
continue // not a function
}
if tSig.Recv() != nil {
panic(tSig)
}
if tSig.Params().Len() != 1 {
continue // not a unary function
}
// Extract x to tmp.
tx := tSig.Params().At(0).Type()
tmp := a.addNodes(tx, "SetFinalizer.tmp")
a.typeAssert(tx, tmp, c.x, false)
// Call f(tmp).
a.store(f, tmp, 1, a.sizeof(tx))
// Add dynamic call target.
if a.onlineCopy(c.targets, f) {
a.addWork(c.targets)
}
}
}
func ext۰runtime۰SetFinalizer(a *analysis, cgn *cgnode) {
// This is the shared contour, used for dynamic calls.
targets := a.addOneNode(tInvalid, "SetFinalizer.targets", nil)
cgn.sites = append(cgn.sites, &callsite{targets: targets})
params := a.funcParams(cgn.obj)
a.addConstraint(&runtimeSetFinalizerConstraint{
targets: targets,
x: params,
f: params + 1,
})
}
// ---------- func time.startTimer(t *runtimeTimer) ----------
// time.StartTimer(t)
type timeStartTimerConstraint struct {
targets nodeid // (indirect)
t nodeid // (ptr)
}
func (c *timeStartTimerConstraint) ptr() nodeid { return c.t }
func (c *timeStartTimerConstraint) presolve(h *hvn) {
h.markIndirect(onodeid(c.targets), "StartTimer.targets")
}
func (c *timeStartTimerConstraint) renumber(mapping []nodeid) {
c.targets = mapping[c.targets]
c.t = mapping[c.t]
}
func (c *timeStartTimerConstraint) String() string {
return fmt.Sprintf("time.startTimer(n%d)", c.t)
}
func (c *timeStartTimerConstraint) solve(a *analysis, delta *nodeset) {
for _, tObj := range delta.AppendTo(a.deltaSpace) {
t := nodeid(tObj)
// We model startTimer as if it was defined thus:
// func startTimer(t *runtimeTimer) { t.f(t.arg) }
// We hard-code the field offsets of time.runtimeTimer:
// type runtimeTimer struct {
// 0 __identity__
// 1 i int32
// 2 when int64
// 3 period int64
// 4 f func(int64, interface{})
// 5 arg interface{}
// }
f := t + 4
arg := t + 5
// store t.arg to t.f.params[0]
// (offset 1 => skip identity)
a.store(f, arg, 1, 1)
// Add dynamic call target.
if a.onlineCopy(c.targets, f) {
a.addWork(c.targets)
}
}
}
func ext۰time۰startTimer(a *analysis, cgn *cgnode) {
// This is the shared contour, used for dynamic calls.
targets := a.addOneNode(tInvalid, "startTimer.targets", nil)
cgn.sites = append(cgn.sites, &callsite{targets: targets})
params := a.funcParams(cgn.obj)
a.addConstraint(&timeStartTimerConstraint{
targets: targets,
t: params,
})
}

152
vendor/golang.org/x/tools/go/pointer/labels.go generated vendored Normal file
View File

@@ -0,0 +1,152 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pointer
import (
"fmt"
"go/token"
"go/types"
"strings"
"golang.org/x/tools/go/ssa"
)
// A Label is an entity that may be pointed to by a pointer, map,
// channel, 'func', slice or interface.
//
// Labels include:
// - functions
// - globals
// - tagged objects, representing interfaces and reflect.Values
// - arrays created by conversions (e.g. []byte("foo"), []byte(s))
// - stack- and heap-allocated variables (including composite literals)
// - channels, maps and arrays created by make()
// - intrinsic or reflective operations that allocate (e.g. append, reflect.New)
// - intrinsic objects, e.g. the initial array behind os.Args.
// - and their subelements, e.g. "alloc.y[*].z"
//
// Labels are so varied that they defy good generalizations;
// some have no value, no callgraph node, or no position.
// Many objects have types that are inexpressible in Go:
// maps, channels, functions, tagged objects.
//
// At most one of Value() or ReflectType() may return non-nil.
//
type Label struct {
obj *object // the addressable memory location containing this label
subelement *fieldInfo // subelement path within obj, e.g. ".a.b[*].c"
}
// Value returns the ssa.Value that allocated this label's object, if any.
func (l Label) Value() ssa.Value {
val, _ := l.obj.data.(ssa.Value)
return val
}
// ReflectType returns the type represented by this label if it is an
// reflect.rtype instance object or *reflect.rtype-tagged object.
//
func (l Label) ReflectType() types.Type {
rtype, _ := l.obj.data.(types.Type)
return rtype
}
// Path returns the path to the subelement of the object containing
// this label. For example, ".x[*].y".
//
func (l Label) Path() string {
return l.subelement.path()
}
// Pos returns the position of this label, if known, zero otherwise.
func (l Label) Pos() token.Pos {
switch data := l.obj.data.(type) {
case ssa.Value:
return data.Pos()
case types.Type:
if nt, ok := deref(data).(*types.Named); ok {
return nt.Obj().Pos()
}
}
if cgn := l.obj.cgn; cgn != nil {
return cgn.fn.Pos()
}
return token.NoPos
}
// String returns the printed form of this label.
//
// Examples: Object type:
// x (a variable)
// (sync.Mutex).Lock (a function)
// convert (array created by conversion)
// makemap (map allocated via make)
// makechan (channel allocated via make)
// makeinterface (tagged object allocated by makeinterface)
// <alloc in reflect.Zero> (allocation in instrinsic)
// sync.Mutex (a reflect.rtype instance)
// <command-line arguments> (an intrinsic object)
//
// Labels within compound objects have subelement paths:
// x.y[*].z (a struct variable, x)
// append.y[*].z (array allocated by append)
// makeslice.y[*].z (array allocated via make)
//
// TODO(adonovan): expose func LabelString(*types.Package, Label).
//
func (l Label) String() string {
var s string
switch v := l.obj.data.(type) {
case types.Type:
return v.String()
case string:
s = v // an intrinsic object (e.g. os.Args[*])
case nil:
if l.obj.cgn != nil {
// allocation by intrinsic or reflective operation
s = fmt.Sprintf("<alloc in %s>", l.obj.cgn.fn)
} else {
s = "<unknown>" // should be unreachable
}
case *ssa.Function:
s = v.String()
case *ssa.Global:
s = v.String()
case *ssa.Const:
s = v.Name()
case *ssa.Alloc:
s = v.Comment
if s == "" {
s = "alloc"
}
case *ssa.Call:
// Currently only calls to append can allocate objects.
if v.Call.Value.(*ssa.Builtin).Object().Name() != "append" {
panic("unhandled *ssa.Call label: " + v.Name())
}
s = "append"
case *ssa.MakeMap, *ssa.MakeChan, *ssa.MakeSlice, *ssa.Convert:
s = strings.ToLower(strings.TrimPrefix(fmt.Sprintf("%T", v), "*ssa."))
case *ssa.MakeInterface:
// MakeInterface is usually implicit in Go source (so
// Pos()==0), and tagged objects may be allocated
// synthetically (so no *MakeInterface data).
s = "makeinterface:" + v.X.Type().String()
default:
panic(fmt.Sprintf("unhandled object data type: %T", v))
}
return s + l.subelement.path()
}

132
vendor/golang.org/x/tools/go/pointer/opt.go generated vendored Normal file
View File

@@ -0,0 +1,132 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pointer
// This file implements renumbering, a pre-solver optimization to
// improve the efficiency of the solver's points-to set representation.
//
// TODO(adonovan): rename file "renumber.go"
import "fmt"
// renumber permutes a.nodes so that all nodes within an addressable
// object appear before all non-addressable nodes, maintaining the
// order of nodes within the same object (as required by offsetAddr).
//
// renumber must update every nodeid in the analysis (constraints,
// Pointers, callgraph, etc) to reflect the new ordering.
//
// This is an optimisation to increase the locality and efficiency of
// sparse representations of points-to sets. (Typically only about
// 20% of nodes are within an object.)
//
// NB: nodes added during solving (e.g. for reflection, SetFinalizer)
// will be appended to the end.
//
// Renumbering makes the PTA log inscrutable. To aid debugging, later
// phases (e.g. HVN) must not rely on it having occurred.
//
func (a *analysis) renumber() {
if a.log != nil {
fmt.Fprintf(a.log, "\n\n==== Renumbering\n\n")
}
N := nodeid(len(a.nodes))
newNodes := make([]*node, N, N)
renumbering := make([]nodeid, N, N) // maps old to new
var i, j nodeid
// The zero node is special.
newNodes[j] = a.nodes[i]
renumbering[i] = j
i++
j++
// Pass 1: object nodes.
for i < N {
obj := a.nodes[i].obj
if obj == nil {
i++
continue
}
end := i + nodeid(obj.size)
for i < end {
newNodes[j] = a.nodes[i]
renumbering[i] = j
i++
j++
}
}
nobj := j
// Pass 2: non-object nodes.
for i = 1; i < N; {
obj := a.nodes[i].obj
if obj != nil {
i += nodeid(obj.size)
continue
}
newNodes[j] = a.nodes[i]
renumbering[i] = j
i++
j++
}
if j != N {
panic(fmt.Sprintf("internal error: j=%d, N=%d", j, N))
}
// Log the remapping table.
if a.log != nil {
fmt.Fprintf(a.log, "Renumbering nodes to improve density:\n")
fmt.Fprintf(a.log, "(%d object nodes of %d total)\n", nobj, N)
for old, new := range renumbering {
fmt.Fprintf(a.log, "\tn%d -> n%d\n", old, new)
}
}
// Now renumber all existing nodeids to use the new node permutation.
// It is critical that all reachable nodeids are accounted for!
// Renumber nodeids in queried Pointers.
for v, ptr := range a.result.Queries {
ptr.n = renumbering[ptr.n]
a.result.Queries[v] = ptr
}
for v, ptr := range a.result.IndirectQueries {
ptr.n = renumbering[ptr.n]
a.result.IndirectQueries[v] = ptr
}
for _, queries := range a.config.extendedQueries {
for _, query := range queries {
if query.ptr != nil {
query.ptr.n = renumbering[query.ptr.n]
}
}
}
// Renumber nodeids in global objects.
for v, id := range a.globalobj {
a.globalobj[v] = renumbering[id]
}
// Renumber nodeids in constraints.
for _, c := range a.constraints {
c.renumber(renumbering)
}
// Renumber nodeids in the call graph.
for _, cgn := range a.cgnodes {
cgn.obj = renumbering[cgn.obj]
for _, site := range cgn.sites {
site.targets = renumbering[site.targets]
}
}
a.nodes = newNodes
}

601
vendor/golang.org/x/tools/go/pointer/pointer_test.go generated vendored Normal file
View File

@@ -0,0 +1,601 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// No testdata on Android.
// +build !android
package pointer_test
// This test uses 'expectation' comments embedded within testdata/*.go
// files to specify the expected pointer analysis behaviour.
// See below for grammar.
import (
"bytes"
"errors"
"fmt"
"go/token"
"go/types"
"io/ioutil"
"os"
"regexp"
"strconv"
"strings"
"testing"
"golang.org/x/tools/go/callgraph"
"golang.org/x/tools/go/loader"
"golang.org/x/tools/go/pointer"
"golang.org/x/tools/go/ssa"
"golang.org/x/tools/go/ssa/ssautil"
"golang.org/x/tools/go/types/typeutil"
)
var inputs = []string{
"testdata/a_test.go",
"testdata/another.go",
"testdata/arrayreflect.go",
"testdata/arrays.go",
"testdata/channels.go",
"testdata/chanreflect.go",
"testdata/context.go",
"testdata/conv.go",
"testdata/extended.go",
"testdata/finalizer.go",
"testdata/flow.go",
"testdata/fmtexcerpt.go",
"testdata/func.go",
"testdata/funcreflect.go",
"testdata/hello.go", // NB: causes spurious failure of HVN cross-check
"testdata/interfaces.go",
"testdata/issue9002.go",
"testdata/mapreflect.go",
"testdata/maps.go",
"testdata/panic.go",
"testdata/recur.go",
"testdata/reflect.go",
"testdata/rtti.go",
"testdata/structreflect.go",
"testdata/structs.go",
// "testdata/timer.go", // TODO(adonovan): fix broken assumptions about runtime timers
}
// Expectation grammar:
//
// @calls f -> g
//
// A 'calls' expectation asserts that edge (f, g) appears in the
// callgraph. f and g are notated as per Function.String(), which
// may contain spaces (e.g. promoted method in anon struct).
//
// @pointsto a | b | c
//
// A 'pointsto' expectation asserts that the points-to set of its
// operand contains exactly the set of labels {a,b,c} notated as per
// labelString.
//
// A 'pointsto' expectation must appear on the same line as a
// print(x) statement; the expectation's operand is x.
//
// If one of the strings is "...", the expectation asserts that the
// points-to set at least the other labels.
//
// We use '|' because label names may contain spaces, e.g. methods
// of anonymous structs.
//
// From a theoretical perspective, concrete types in interfaces are
// labels too, but they are represented differently and so have a
// different expectation, @types, below.
//
// @types t | u | v
//
// A 'types' expectation asserts that the set of possible dynamic
// types of its interface operand is exactly {t,u,v}, notated per
// go/types.Type.String(). In other words, it asserts that the type
// component of the interface may point to that set of concrete type
// literals. It also works for reflect.Value, though the types
// needn't be concrete in that case.
//
// A 'types' expectation must appear on the same line as a
// print(x) statement; the expectation's operand is x.
//
// If one of the strings is "...", the expectation asserts that the
// interface's type may point to at least the other types.
//
// We use '|' because type names may contain spaces.
//
// @warning "regexp"
//
// A 'warning' expectation asserts that the analysis issues a
// warning that matches the regular expression within the string
// literal.
//
// @line id
//
// A line directive associates the name "id" with the current
// file:line. The string form of labels will use this id instead of
// a file:line, making @pointsto expectations more robust against
// perturbations in the source file.
// (NB, anon functions still include line numbers.)
//
type expectation struct {
kind string // "pointsto" | "pointstoquery" | "types" | "calls" | "warning"
filename string
linenum int // source line number, 1-based
args []string
query string // extended query
extended *pointer.Pointer // extended query pointer
types []types.Type // for types
}
func (e *expectation) String() string {
return fmt.Sprintf("@%s[%s]", e.kind, strings.Join(e.args, " | "))
}
func (e *expectation) errorf(format string, args ...interface{}) {
fmt.Printf("%s:%d: ", e.filename, e.linenum)
fmt.Printf(format, args...)
fmt.Println()
}
func (e *expectation) needsProbe() bool {
return e.kind == "pointsto" || e.kind == "pointstoquery" || e.kind == "types"
}
// Find probe (call to print(x)) of same source file/line as expectation.
func findProbe(prog *ssa.Program, probes map[*ssa.CallCommon]bool, queries map[ssa.Value]pointer.Pointer, e *expectation) (site *ssa.CallCommon, pts pointer.PointsToSet) {
for call := range probes {
pos := prog.Fset.Position(call.Pos())
if pos.Line == e.linenum && pos.Filename == e.filename {
// TODO(adonovan): send this to test log (display only on failure).
// fmt.Printf("%s:%d: info: found probe for %s: %s\n",
// e.filename, e.linenum, e, p.arg0) // debugging
return call, queries[call.Args[0]].PointsTo()
}
}
return // e.g. analysis didn't reach this call
}
func doOneInput(input, filename string) bool {
var conf loader.Config
// Parsing.
f, err := conf.ParseFile(filename, input)
if err != nil {
fmt.Println(err)
return false
}
// Create single-file main package and import its dependencies.
conf.CreateFromFiles("main", f)
iprog, err := conf.Load()
if err != nil {
fmt.Println(err)
return false
}
mainPkgInfo := iprog.Created[0].Pkg
// SSA creation + building.
prog := ssautil.CreateProgram(iprog, ssa.SanityCheckFunctions)
prog.Build()
mainpkg := prog.Package(mainPkgInfo)
ptrmain := mainpkg // main package for the pointer analysis
if mainpkg.Func("main") == nil {
// No main function; assume it's a test.
ptrmain = prog.CreateTestMainPackage(mainpkg)
}
// Find all calls to the built-in print(x). Analytically,
// print is a no-op, but it's a convenient hook for testing
// the PTS of an expression, so our tests use it.
probes := make(map[*ssa.CallCommon]bool)
for fn := range ssautil.AllFunctions(prog) {
if fn.Pkg == mainpkg {
for _, b := range fn.Blocks {
for _, instr := range b.Instrs {
if instr, ok := instr.(ssa.CallInstruction); ok {
call := instr.Common()
if b, ok := call.Value.(*ssa.Builtin); ok && b.Name() == "print" && len(call.Args) == 1 {
probes[instr.Common()] = true
}
}
}
}
}
}
ok := true
lineMapping := make(map[string]string) // maps "file:line" to @line tag
// Parse expectations in this input.
var exps []*expectation
re := regexp.MustCompile("// *@([a-z]*) *(.*)$")
lines := strings.Split(input, "\n")
for linenum, line := range lines {
linenum++ // make it 1-based
if matches := re.FindAllStringSubmatch(line, -1); matches != nil {
match := matches[0]
kind, rest := match[1], match[2]
e := &expectation{kind: kind, filename: filename, linenum: linenum}
if kind == "line" {
if rest == "" {
ok = false
e.errorf("@%s expectation requires identifier", kind)
} else {
lineMapping[fmt.Sprintf("%s:%d", filename, linenum)] = rest
}
continue
}
if e.needsProbe() && !strings.Contains(line, "print(") {
ok = false
e.errorf("@%s expectation must follow call to print(x)", kind)
continue
}
switch kind {
case "pointsto":
e.args = split(rest, "|")
case "pointstoquery":
args := strings.SplitN(rest, " ", 2)
e.query = args[0]
e.args = split(args[1], "|")
case "types":
for _, typstr := range split(rest, "|") {
var t types.Type = types.Typ[types.Invalid] // means "..."
if typstr != "..." {
tv, err := types.Eval(prog.Fset, mainpkg.Pkg, f.Pos(), typstr)
if err != nil {
ok = false
// Don't print err since its location is bad.
e.errorf("'%s' is not a valid type: %s", typstr, err)
continue
}
t = tv.Type
}
e.types = append(e.types, t)
}
case "calls":
e.args = split(rest, "->")
// TODO(adonovan): eagerly reject the
// expectation if fn doesn't denote
// existing function, rather than fail
// the expectation after analysis.
if len(e.args) != 2 {
ok = false
e.errorf("@calls expectation wants 'caller -> callee' arguments")
continue
}
case "warning":
lit, err := strconv.Unquote(strings.TrimSpace(rest))
if err != nil {
ok = false
e.errorf("couldn't parse @warning operand: %s", err.Error())
continue
}
e.args = append(e.args, lit)
default:
ok = false
e.errorf("unknown expectation kind: %s", e)
continue
}
exps = append(exps, e)
}
}
var log bytes.Buffer
fmt.Fprintf(&log, "Input: %s\n", filename)
// Run the analysis.
config := &pointer.Config{
Reflection: true,
BuildCallGraph: true,
Mains: []*ssa.Package{ptrmain},
Log: &log,
}
probeLoop:
for probe := range probes {
v := probe.Args[0]
pos := prog.Fset.Position(probe.Pos())
for _, e := range exps {
if e.linenum == pos.Line && e.filename == pos.Filename && e.kind == "pointstoquery" {
var err error
e.extended, err = config.AddExtendedQuery(v, e.query)
if err != nil {
panic(err)
}
continue probeLoop
}
}
if pointer.CanPoint(v.Type()) {
config.AddQuery(v)
}
}
// Print the log is there was an error or a panic.
complete := false
defer func() {
if !complete || !ok {
log.WriteTo(os.Stderr)
}
}()
result, err := pointer.Analyze(config)
if err != nil {
panic(err) // internal error in pointer analysis
}
// Check the expectations.
for _, e := range exps {
var call *ssa.CallCommon
var pts pointer.PointsToSet
var tProbe types.Type
if e.needsProbe() {
if call, pts = findProbe(prog, probes, result.Queries, e); call == nil {
ok = false
e.errorf("unreachable print() statement has expectation %s", e)
continue
}
if e.extended != nil {
pts = e.extended.PointsTo()
}
tProbe = call.Args[0].Type()
if !pointer.CanPoint(tProbe) {
ok = false
e.errorf("expectation on non-pointerlike operand: %s", tProbe)
continue
}
}
switch e.kind {
case "pointsto", "pointstoquery":
if !checkPointsToExpectation(e, pts, lineMapping, prog) {
ok = false
}
case "types":
if !checkTypesExpectation(e, pts, tProbe) {
ok = false
}
case "calls":
if !checkCallsExpectation(prog, e, result.CallGraph) {
ok = false
}
case "warning":
if !checkWarningExpectation(prog, e, result.Warnings) {
ok = false
}
}
}
complete = true
// ok = false // debugging: uncomment to always see log
return ok
}
func labelString(l *pointer.Label, lineMapping map[string]string, prog *ssa.Program) string {
// Functions and Globals need no pos suffix,
// nor do allocations in intrinsic operations
// (for which we'll print the function name).
switch l.Value().(type) {
case nil, *ssa.Function, *ssa.Global:
return l.String()
}
str := l.String()
if pos := l.Pos(); pos != token.NoPos {
// Append the position, using a @line tag instead of a line number, if defined.
posn := prog.Fset.Position(pos)
s := fmt.Sprintf("%s:%d", posn.Filename, posn.Line)
if tag, ok := lineMapping[s]; ok {
return fmt.Sprintf("%s@%s:%d", str, tag, posn.Column)
}
str = fmt.Sprintf("%s@%s", str, posn)
}
return str
}
func checkPointsToExpectation(e *expectation, pts pointer.PointsToSet, lineMapping map[string]string, prog *ssa.Program) bool {
expected := make(map[string]int)
surplus := make(map[string]int)
exact := true
for _, g := range e.args {
if g == "..." {
exact = false
continue
}
expected[g]++
}
// Find the set of labels that the probe's
// argument (x in print(x)) may point to.
for _, label := range pts.Labels() {
name := labelString(label, lineMapping, prog)
if expected[name] > 0 {
expected[name]--
} else if exact {
surplus[name]++
}
}
// Report multiset difference:
ok := true
for _, count := range expected {
if count > 0 {
ok = false
e.errorf("value does not alias these expected labels: %s", join(expected))
break
}
}
for _, count := range surplus {
if count > 0 {
ok = false
e.errorf("value may additionally alias these labels: %s", join(surplus))
break
}
}
return ok
}
func checkTypesExpectation(e *expectation, pts pointer.PointsToSet, typ types.Type) bool {
var expected typeutil.Map
var surplus typeutil.Map
exact := true
for _, g := range e.types {
if g == types.Typ[types.Invalid] {
exact = false
continue
}
expected.Set(g, struct{}{})
}
if !pointer.CanHaveDynamicTypes(typ) {
e.errorf("@types expectation requires an interface- or reflect.Value-typed operand, got %s", typ)
return false
}
// Find the set of types that the probe's
// argument (x in print(x)) may contain.
for _, T := range pts.DynamicTypes().Keys() {
if expected.At(T) != nil {
expected.Delete(T)
} else if exact {
surplus.Set(T, struct{}{})
}
}
// Report set difference:
ok := true
if expected.Len() > 0 {
ok = false
e.errorf("interface cannot contain these types: %s", expected.KeysString())
}
if surplus.Len() > 0 {
ok = false
e.errorf("interface may additionally contain these types: %s", surplus.KeysString())
}
return ok
}
var errOK = errors.New("OK")
func checkCallsExpectation(prog *ssa.Program, e *expectation, cg *callgraph.Graph) bool {
found := make(map[string]int)
err := callgraph.GraphVisitEdges(cg, func(edge *callgraph.Edge) error {
// Name-based matching is inefficient but it allows us to
// match functions whose names that would not appear in an
// index ("<root>") or which are not unique ("func@1.2").
if edge.Caller.Func.String() == e.args[0] {
calleeStr := edge.Callee.Func.String()
if calleeStr == e.args[1] {
return errOK // expectation satisfied; stop the search
}
found[calleeStr]++
}
return nil
})
if err == errOK {
return true
}
if len(found) == 0 {
e.errorf("didn't find any calls from %s", e.args[0])
}
e.errorf("found no call from %s to %s, but only to %s",
e.args[0], e.args[1], join(found))
return false
}
func checkWarningExpectation(prog *ssa.Program, e *expectation, warnings []pointer.Warning) bool {
// TODO(adonovan): check the position part of the warning too?
re, err := regexp.Compile(e.args[0])
if err != nil {
e.errorf("invalid regular expression in @warning expectation: %s", err.Error())
return false
}
if len(warnings) == 0 {
e.errorf("@warning %q expectation, but no warnings", e.args[0])
return false
}
for _, w := range warnings {
if re.MatchString(w.Message) {
return true
}
}
e.errorf("@warning %q expectation not satisfied; found these warnings though:", e.args[0])
for _, w := range warnings {
fmt.Printf("%s: warning: %s\n", prog.Fset.Position(w.Pos), w.Message)
}
return false
}
func TestInput(t *testing.T) {
if testing.Short() {
t.Skip("skipping in short mode; this test requires tons of memory; golang.org/issue/14113")
}
ok := true
wd, err := os.Getwd()
if err != nil {
t.Errorf("os.Getwd: %s", err)
return
}
// 'go test' does a chdir so that relative paths in
// diagnostics no longer make sense relative to the invoking
// shell's cwd. We print a special marker so that Emacs can
// make sense of them.
fmt.Fprintf(os.Stderr, "Entering directory `%s'\n", wd)
for _, filename := range inputs {
content, err := ioutil.ReadFile(filename)
if err != nil {
t.Errorf("couldn't read file '%s': %s", filename, err)
continue
}
if !doOneInput(string(content), filename) {
ok = false
}
}
if !ok {
t.Fail()
}
}
// join joins the elements of multiset with " | "s.
func join(set map[string]int) string {
var buf bytes.Buffer
sep := ""
for name, count := range set {
for i := 0; i < count; i++ {
buf.WriteString(sep)
sep = " | "
buf.WriteString(name)
}
}
return buf.String()
}
// split returns the list of sep-delimited non-empty strings in s.
func split(s, sep string) (r []string) {
for _, elem := range strings.Split(s, sep) {
elem = strings.TrimSpace(elem)
if elem != "" {
r = append(r, elem)
}
}
return
}

43
vendor/golang.org/x/tools/go/pointer/print.go generated vendored Normal file
View File

@@ -0,0 +1,43 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pointer
import "fmt"
func (c *addrConstraint) String() string {
return fmt.Sprintf("addr n%d <- {&n%d}", c.dst, c.src)
}
func (c *copyConstraint) String() string {
return fmt.Sprintf("copy n%d <- n%d", c.dst, c.src)
}
func (c *loadConstraint) String() string {
return fmt.Sprintf("load n%d <- n%d[%d]", c.dst, c.src, c.offset)
}
func (c *storeConstraint) String() string {
return fmt.Sprintf("store n%d[%d] <- n%d", c.dst, c.offset, c.src)
}
func (c *offsetAddrConstraint) String() string {
return fmt.Sprintf("offsetAddr n%d <- n%d.#%d", c.dst, c.src, c.offset)
}
func (c *typeFilterConstraint) String() string {
return fmt.Sprintf("typeFilter n%d <- n%d.(%s)", c.dst, c.src, c.typ)
}
func (c *untagConstraint) String() string {
return fmt.Sprintf("untag n%d <- n%d.(%s)", c.dst, c.src, c.typ)
}
func (c *invokeConstraint) String() string {
return fmt.Sprintf("invoke n%d.%s(n%d ...)", c.iface, c.method.Name(), c.params)
}
func (n nodeid) String() string {
return fmt.Sprintf("n%d", n)
}

221
vendor/golang.org/x/tools/go/pointer/query.go generated vendored Normal file
View File

@@ -0,0 +1,221 @@
package pointer
import (
"errors"
"fmt"
"go/ast"
"go/parser"
"go/token"
"go/types"
"strconv"
)
// An extendedQuery represents a sequence of destructuring operations
// applied to an ssa.Value (denoted by "x").
type extendedQuery struct {
ops []interface{}
ptr *Pointer
}
// indexValue returns the value of an integer literal used as an
// index.
func indexValue(expr ast.Expr) (int, error) {
lit, ok := expr.(*ast.BasicLit)
if !ok {
return 0, fmt.Errorf("non-integer index (%T)", expr)
}
if lit.Kind != token.INT {
return 0, fmt.Errorf("non-integer index %s", lit.Value)
}
return strconv.Atoi(lit.Value)
}
// parseExtendedQuery parses and validates a destructuring Go
// expression and returns the sequence of destructuring operations.
// See parseDestructuringExpr for details.
func parseExtendedQuery(typ types.Type, query string) ([]interface{}, types.Type, error) {
expr, err := parser.ParseExpr(query)
if err != nil {
return nil, nil, err
}
ops, typ, err := destructuringOps(typ, expr)
if err != nil {
return nil, nil, err
}
if len(ops) == 0 {
return nil, nil, errors.New("invalid query: must not be empty")
}
if ops[0] != "x" {
return nil, nil, fmt.Errorf("invalid query: query operand must be named x")
}
if !CanPoint(typ) {
return nil, nil, fmt.Errorf("query does not describe a pointer-like value: %s", typ)
}
return ops, typ, nil
}
// destructuringOps parses a Go expression consisting only of an
// identifier "x", field selections, indexing, channel receives, load
// operations and parens---for example: "<-(*x[i])[key]"--- and
// returns the sequence of destructuring operations on x.
func destructuringOps(typ types.Type, expr ast.Expr) ([]interface{}, types.Type, error) {
switch expr := expr.(type) {
case *ast.SelectorExpr:
out, typ, err := destructuringOps(typ, expr.X)
if err != nil {
return nil, nil, err
}
var structT *types.Struct
switch typ := typ.(type) {
case *types.Pointer:
var ok bool
structT, ok = typ.Elem().Underlying().(*types.Struct)
if !ok {
return nil, nil, fmt.Errorf("cannot access field %s of pointer to type %s", expr.Sel.Name, typ.Elem())
}
out = append(out, "load")
case *types.Struct:
structT = typ
default:
return nil, nil, fmt.Errorf("cannot access field %s of type %s", expr.Sel.Name, typ)
}
for i := 0; i < structT.NumFields(); i++ {
field := structT.Field(i)
if field.Name() == expr.Sel.Name {
out = append(out, "field", i)
return out, field.Type().Underlying(), nil
}
}
// TODO(dh): supporting embedding would need something like
// types.LookupFieldOrMethod, but without taking package
// boundaries into account, because we may want to access
// unexported fields. If we were only interested in one level
// of unexported name, we could determine the appropriate
// package and run LookupFieldOrMethod with that. However, a
// single query may want to cross multiple package boundaries,
// and at this point it's not really worth the complexity.
return nil, nil, fmt.Errorf("no field %s in %s (embedded fields must be resolved manually)", expr.Sel.Name, structT)
case *ast.Ident:
return []interface{}{expr.Name}, typ, nil
case *ast.BasicLit:
return []interface{}{expr.Value}, nil, nil
case *ast.IndexExpr:
out, typ, err := destructuringOps(typ, expr.X)
if err != nil {
return nil, nil, err
}
switch typ := typ.(type) {
case *types.Array:
out = append(out, "arrayelem")
return out, typ.Elem().Underlying(), nil
case *types.Slice:
out = append(out, "sliceelem")
return out, typ.Elem().Underlying(), nil
case *types.Map:
out = append(out, "mapelem")
return out, typ.Elem().Underlying(), nil
case *types.Tuple:
out = append(out, "index")
idx, err := indexValue(expr.Index)
if err != nil {
return nil, nil, err
}
out = append(out, idx)
if idx >= typ.Len() || idx < 0 {
return nil, nil, fmt.Errorf("tuple index %d out of bounds", idx)
}
return out, typ.At(idx).Type().Underlying(), nil
default:
return nil, nil, fmt.Errorf("cannot index type %s", typ)
}
case *ast.UnaryExpr:
if expr.Op != token.ARROW {
return nil, nil, fmt.Errorf("unsupported unary operator %s", expr.Op)
}
out, typ, err := destructuringOps(typ, expr.X)
if err != nil {
return nil, nil, err
}
ch, ok := typ.(*types.Chan)
if !ok {
return nil, nil, fmt.Errorf("cannot receive from value of type %s", typ)
}
out = append(out, "recv")
return out, ch.Elem().Underlying(), err
case *ast.ParenExpr:
return destructuringOps(typ, expr.X)
case *ast.StarExpr:
out, typ, err := destructuringOps(typ, expr.X)
if err != nil {
return nil, nil, err
}
ptr, ok := typ.(*types.Pointer)
if !ok {
return nil, nil, fmt.Errorf("cannot dereference type %s", typ)
}
out = append(out, "load")
return out, ptr.Elem().Underlying(), err
default:
return nil, nil, fmt.Errorf("unsupported expression %T", expr)
}
}
func (a *analysis) evalExtendedQuery(t types.Type, id nodeid, ops []interface{}) (types.Type, nodeid) {
pid := id
// TODO(dh): we're allocating intermediary nodes each time
// evalExtendedQuery is called. We should probably only generate
// them once per (v, ops) pair.
for i := 1; i < len(ops); i++ {
var nid nodeid
switch ops[i] {
case "recv":
t = t.(*types.Chan).Elem().Underlying()
nid = a.addNodes(t, "query.extended")
a.load(nid, pid, 0, a.sizeof(t))
case "field":
i++ // fetch field index
tt := t.(*types.Struct)
idx := ops[i].(int)
offset := a.offsetOf(t, idx)
t = tt.Field(idx).Type().Underlying()
nid = a.addNodes(t, "query.extended")
a.copy(nid, pid+nodeid(offset), a.sizeof(t))
case "arrayelem":
t = t.(*types.Array).Elem().Underlying()
nid = a.addNodes(t, "query.extended")
a.copy(nid, 1+pid, a.sizeof(t))
case "sliceelem":
t = t.(*types.Slice).Elem().Underlying()
nid = a.addNodes(t, "query.extended")
a.load(nid, pid, 1, a.sizeof(t))
case "mapelem":
tt := t.(*types.Map)
t = tt.Elem()
ksize := a.sizeof(tt.Key())
vsize := a.sizeof(tt.Elem())
nid = a.addNodes(t, "query.extended")
a.load(nid, pid, ksize, vsize)
case "index":
i++ // fetch index
tt := t.(*types.Tuple)
idx := ops[i].(int)
t = tt.At(idx).Type().Underlying()
nid = a.addNodes(t, "query.extended")
a.copy(nid, pid+nodeid(idx), a.sizeof(t))
case "load":
t = t.(*types.Pointer).Elem().Underlying()
nid = a.addNodes(t, "query.extended")
a.load(nid, pid, 0, a.sizeof(t))
default:
// shouldn't happen
panic(fmt.Sprintf("unknown op %q", ops[i]))
}
pid = nid
}
return t, pid
}

68
vendor/golang.org/x/tools/go/pointer/query_test.go generated vendored Normal file
View File

@@ -0,0 +1,68 @@
package pointer
import (
"go/ast"
"go/parser"
"go/token"
"go/types"
"reflect"
"testing"
)
func TestParseExtendedQuery(t *testing.T) {
const myprog = `
package pkg
var V1 *int
var V2 **int
var V3 []*int
var V4 chan []*int
var V5 struct {F1, F2 chan *int}
var V6 [1]chan *int
var V7 int
`
tests := []struct {
in string
out []interface{}
v string
valid bool
}{
{`x`, []interface{}{"x"}, "V1", true},
{`*x`, []interface{}{"x", "load"}, "V2", true},
{`x[0]`, []interface{}{"x", "sliceelem"}, "V3", true},
{`<-x`, []interface{}{"x", "recv"}, "V4", true},
{`(<-x)[0]`, []interface{}{"x", "recv", "sliceelem"}, "V4", true},
{`<-x.F2`, []interface{}{"x", "field", 1, "recv"}, "V5", true},
{`<-x[0]`, []interface{}{"x", "arrayelem", "recv"}, "V6", true},
{`x`, nil, "V7", false},
{`y`, nil, "V1", false},
{`x; x`, nil, "V1", false},
{`x()`, nil, "V1", false},
{`close(x)`, nil, "V1", false},
}
fset := token.NewFileSet()
f, err := parser.ParseFile(fset, "file.go", myprog, 0)
if err != nil {
t.Fatal(err)
}
cfg := &types.Config{}
pkg, err := cfg.Check("main", fset, []*ast.File{f}, nil)
if err != nil {
t.Fatal(err)
}
for _, test := range tests {
typ := pkg.Scope().Lookup(test.v).Type().Underlying()
ops, _, err := parseExtendedQuery(typ, test.in)
if test.valid && err != nil {
t.Errorf("parseExtendedQuery(%q) = %s, expected no error", test.in, err)
}
if !test.valid && err == nil {
t.Errorf("parseExtendedQuery(%q) succeeded, expected error", test.in)
}
if !reflect.DeepEqual(ops, test.out) {
t.Errorf("parseExtendedQuery(%q) = %#v, want %#v", test.in, ops, test.out)
}
}
}

1975
vendor/golang.org/x/tools/go/pointer/reflect.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

370
vendor/golang.org/x/tools/go/pointer/solve.go generated vendored Normal file
View File

@@ -0,0 +1,370 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pointer
// This file defines a naive Andersen-style solver for the inclusion
// constraint system.
import (
"fmt"
"go/types"
)
type solverState struct {
complex []constraint // complex constraints attached to this node
copyTo nodeset // simple copy constraint edges
pts nodeset // points-to set of this node
prevPTS nodeset // pts(n) in previous iteration (for difference propagation)
}
func (a *analysis) solve() {
start("Solving")
if a.log != nil {
fmt.Fprintf(a.log, "\n\n==== Solving constraints\n\n")
}
// Solver main loop.
var delta nodeset
for {
// Add new constraints to the graph:
// static constraints from SSA on round 1,
// dynamic constraints from reflection thereafter.
a.processNewConstraints()
var x int
if !a.work.TakeMin(&x) {
break // empty
}
id := nodeid(x)
if a.log != nil {
fmt.Fprintf(a.log, "\tnode n%d\n", id)
}
n := a.nodes[id]
// Difference propagation.
delta.Difference(&n.solve.pts.Sparse, &n.solve.prevPTS.Sparse)
if delta.IsEmpty() {
continue
}
if a.log != nil {
fmt.Fprintf(a.log, "\t\tpts(n%d : %s) = %s + %s\n",
id, n.typ, &delta, &n.solve.prevPTS)
}
n.solve.prevPTS.Copy(&n.solve.pts.Sparse)
// Apply all resolution rules attached to n.
a.solveConstraints(n, &delta)
if a.log != nil {
fmt.Fprintf(a.log, "\t\tpts(n%d) = %s\n", id, &n.solve.pts)
}
}
if !a.nodes[0].solve.pts.IsEmpty() {
panic(fmt.Sprintf("pts(0) is nonempty: %s", &a.nodes[0].solve.pts))
}
// Release working state (but keep final PTS).
for _, n := range a.nodes {
n.solve.complex = nil
n.solve.copyTo.Clear()
n.solve.prevPTS.Clear()
}
if a.log != nil {
fmt.Fprintf(a.log, "Solver done\n")
// Dump solution.
for i, n := range a.nodes {
if !n.solve.pts.IsEmpty() {
fmt.Fprintf(a.log, "pts(n%d) = %s : %s\n", i, &n.solve.pts, n.typ)
}
}
}
stop("Solving")
}
// processNewConstraints takes the new constraints from a.constraints
// and adds them to the graph, ensuring
// that new constraints are applied to pre-existing labels and
// that pre-existing constraints are applied to new labels.
//
func (a *analysis) processNewConstraints() {
// Take the slice of new constraints.
// (May grow during call to solveConstraints.)
constraints := a.constraints
a.constraints = nil
// Initialize points-to sets from addr-of (base) constraints.
for _, c := range constraints {
if c, ok := c.(*addrConstraint); ok {
dst := a.nodes[c.dst]
dst.solve.pts.add(c.src)
// Populate the worklist with nodes that point to
// something initially (due to addrConstraints) and
// have other constraints attached.
// (A no-op in round 1.)
if !dst.solve.copyTo.IsEmpty() || len(dst.solve.complex) > 0 {
a.addWork(c.dst)
}
}
}
// Attach simple (copy) and complex constraints to nodes.
var stale nodeset
for _, c := range constraints {
var id nodeid
switch c := c.(type) {
case *addrConstraint:
// base constraints handled in previous loop
continue
case *copyConstraint:
// simple (copy) constraint
id = c.src
a.nodes[id].solve.copyTo.add(c.dst)
default:
// complex constraint
id = c.ptr()
solve := a.nodes[id].solve
solve.complex = append(solve.complex, c)
}
if n := a.nodes[id]; !n.solve.pts.IsEmpty() {
if !n.solve.prevPTS.IsEmpty() {
stale.add(id)
}
a.addWork(id)
}
}
// Apply new constraints to pre-existing PTS labels.
var space [50]int
for _, id := range stale.AppendTo(space[:0]) {
n := a.nodes[nodeid(id)]
a.solveConstraints(n, &n.solve.prevPTS)
}
}
// solveConstraints applies each resolution rule attached to node n to
// the set of labels delta. It may generate new constraints in
// a.constraints.
//
func (a *analysis) solveConstraints(n *node, delta *nodeset) {
if delta.IsEmpty() {
return
}
// Process complex constraints dependent on n.
for _, c := range n.solve.complex {
if a.log != nil {
fmt.Fprintf(a.log, "\t\tconstraint %s\n", c)
}
c.solve(a, delta)
}
// Process copy constraints.
var copySeen nodeset
for _, x := range n.solve.copyTo.AppendTo(a.deltaSpace) {
mid := nodeid(x)
if copySeen.add(mid) {
if a.nodes[mid].solve.pts.addAll(delta) {
a.addWork(mid)
}
}
}
}
// addLabel adds label to the points-to set of ptr and reports whether the set grew.
func (a *analysis) addLabel(ptr, label nodeid) bool {
b := a.nodes[ptr].solve.pts.add(label)
if b && a.log != nil {
fmt.Fprintf(a.log, "\t\tpts(n%d) += n%d\n", ptr, label)
}
return b
}
func (a *analysis) addWork(id nodeid) {
a.work.Insert(int(id))
if a.log != nil {
fmt.Fprintf(a.log, "\t\twork: n%d\n", id)
}
}
// onlineCopy adds a copy edge. It is called online, i.e. during
// solving, so it adds edges and pts members directly rather than by
// instantiating a 'constraint'.
//
// The size of the copy is implicitly 1.
// It returns true if pts(dst) changed.
//
func (a *analysis) onlineCopy(dst, src nodeid) bool {
if dst != src {
if nsrc := a.nodes[src]; nsrc.solve.copyTo.add(dst) {
if a.log != nil {
fmt.Fprintf(a.log, "\t\t\tdynamic copy n%d <- n%d\n", dst, src)
}
// TODO(adonovan): most calls to onlineCopy
// are followed by addWork, possibly batched
// via a 'changed' flag; see if there's a
// noticeable penalty to calling addWork here.
return a.nodes[dst].solve.pts.addAll(&nsrc.solve.pts)
}
}
return false
}
// Returns sizeof.
// Implicitly adds nodes to worklist.
//
// TODO(adonovan): now that we support a.copy() during solving, we
// could eliminate onlineCopyN, but it's much slower. Investigate.
//
func (a *analysis) onlineCopyN(dst, src nodeid, sizeof uint32) uint32 {
for i := uint32(0); i < sizeof; i++ {
if a.onlineCopy(dst, src) {
a.addWork(dst)
}
src++
dst++
}
return sizeof
}
func (c *loadConstraint) solve(a *analysis, delta *nodeset) {
var changed bool
for _, x := range delta.AppendTo(a.deltaSpace) {
k := nodeid(x)
koff := k + nodeid(c.offset)
if a.onlineCopy(c.dst, koff) {
changed = true
}
}
if changed {
a.addWork(c.dst)
}
}
func (c *storeConstraint) solve(a *analysis, delta *nodeset) {
for _, x := range delta.AppendTo(a.deltaSpace) {
k := nodeid(x)
koff := k + nodeid(c.offset)
if a.onlineCopy(koff, c.src) {
a.addWork(koff)
}
}
}
func (c *offsetAddrConstraint) solve(a *analysis, delta *nodeset) {
dst := a.nodes[c.dst]
for _, x := range delta.AppendTo(a.deltaSpace) {
k := nodeid(x)
if dst.solve.pts.add(k + nodeid(c.offset)) {
a.addWork(c.dst)
}
}
}
func (c *typeFilterConstraint) solve(a *analysis, delta *nodeset) {
for _, x := range delta.AppendTo(a.deltaSpace) {
ifaceObj := nodeid(x)
tDyn, _, indirect := a.taggedValue(ifaceObj)
if indirect {
// TODO(adonovan): we'll need to implement this
// when we start creating indirect tagged objects.
panic("indirect tagged object")
}
if types.AssignableTo(tDyn, c.typ) {
if a.addLabel(c.dst, ifaceObj) {
a.addWork(c.dst)
}
}
}
}
func (c *untagConstraint) solve(a *analysis, delta *nodeset) {
predicate := types.AssignableTo
if c.exact {
predicate = types.Identical
}
for _, x := range delta.AppendTo(a.deltaSpace) {
ifaceObj := nodeid(x)
tDyn, v, indirect := a.taggedValue(ifaceObj)
if indirect {
// TODO(adonovan): we'll need to implement this
// when we start creating indirect tagged objects.
panic("indirect tagged object")
}
if predicate(tDyn, c.typ) {
// Copy payload sans tag to dst.
//
// TODO(adonovan): opt: if tDyn is
// nonpointerlike we can skip this entire
// constraint, perhaps. We only care about
// pointers among the fields.
a.onlineCopyN(c.dst, v, a.sizeof(tDyn))
}
}
}
func (c *invokeConstraint) solve(a *analysis, delta *nodeset) {
for _, x := range delta.AppendTo(a.deltaSpace) {
ifaceObj := nodeid(x)
tDyn, v, indirect := a.taggedValue(ifaceObj)
if indirect {
// TODO(adonovan): we may need to implement this if
// we ever apply invokeConstraints to reflect.Value PTSs,
// e.g. for (reflect.Value).Call.
panic("indirect tagged object")
}
// Look up the concrete method.
fn := a.prog.LookupMethod(tDyn, c.method.Pkg(), c.method.Name())
if fn == nil {
panic(fmt.Sprintf("n%d: no ssa.Function for %s", c.iface, c.method))
}
sig := fn.Signature
fnObj := a.globalobj[fn] // dynamic calls use shared contour
if fnObj == 0 {
// a.objectNode(fn) was not called during gen phase.
panic(fmt.Sprintf("a.globalobj[%s]==nil", fn))
}
// Make callsite's fn variable point to identity of
// concrete method. (There's no need to add it to
// worklist since it never has attached constraints.)
a.addLabel(c.params, fnObj)
// Extract value and connect to method's receiver.
// Copy payload to method's receiver param (arg0).
arg0 := a.funcParams(fnObj)
recvSize := a.sizeof(sig.Recv().Type())
a.onlineCopyN(arg0, v, recvSize)
src := c.params + 1 // skip past identity
dst := arg0 + nodeid(recvSize)
// Copy caller's argument block to method formal parameters.
paramsSize := a.sizeof(sig.Params())
a.onlineCopyN(dst, src, paramsSize)
src += nodeid(paramsSize)
dst += nodeid(paramsSize)
// Copy method results to caller's result block.
resultsSize := a.sizeof(sig.Results())
a.onlineCopyN(src, dst, resultsSize)
}
}
func (c *addrConstraint) solve(a *analysis, delta *nodeset) {
panic("addr is not a complex constraint")
}
func (c *copyConstraint) solve(a *analysis, delta *nodeset) {
panic("copy is not a complex constraint")
}

111
vendor/golang.org/x/tools/go/pointer/stdlib_test.go generated vendored Normal file
View File

@@ -0,0 +1,111 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Incomplete source tree on Android.
// +build !android
package pointer
// This file runs the pointer analysis on all packages and tests beneath
// $GOROOT. It provides a "smoke test" that the analysis doesn't crash
// on a large input, and a benchmark for performance measurement.
//
// Because it is relatively slow, the --stdlib flag must be enabled for
// this test to run:
// % go test -v golang.org/x/tools/go/pointer --stdlib
import (
"flag"
"go/build"
"go/token"
"testing"
"time"
"golang.org/x/tools/go/buildutil"
"golang.org/x/tools/go/loader"
"golang.org/x/tools/go/ssa"
"golang.org/x/tools/go/ssa/ssautil"
)
var runStdlibTest = flag.Bool("stdlib", false, "Run the (slow) stdlib test")
func TestStdlib(t *testing.T) {
if !*runStdlibTest {
t.Skip("skipping (slow) stdlib test (use --stdlib)")
}
// Load, parse and type-check the program.
ctxt := build.Default // copy
ctxt.GOPATH = "" // disable GOPATH
conf := loader.Config{Build: &ctxt}
if _, err := conf.FromArgs(buildutil.AllPackages(conf.Build), true); err != nil {
t.Errorf("FromArgs failed: %v", err)
return
}
iprog, err := conf.Load()
if err != nil {
t.Fatalf("Load failed: %v", err)
}
// Create SSA packages.
prog := ssautil.CreateProgram(iprog, 0)
prog.Build()
numPkgs := len(prog.AllPackages())
if want := 240; numPkgs < want {
t.Errorf("Loaded only %d packages, want at least %d", numPkgs, want)
}
// Determine the set of packages/tests to analyze.
var mains []*ssa.Package
for _, info := range iprog.InitialPackages() {
ssapkg := prog.Package(info.Pkg)
if main := prog.CreateTestMainPackage(ssapkg); main != nil {
mains = append(mains, main)
}
}
if mains == nil {
t.Fatal("no tests found in analysis scope")
}
// Run the analysis.
config := &Config{
Reflection: false, // TODO(adonovan): fix remaining bug in rVCallConstraint, then enable.
BuildCallGraph: true,
Mains: mains,
}
// TODO(adonovan): add some query values (affects track bits).
t0 := time.Now()
result, err := Analyze(config)
if err != nil {
t.Fatal(err) // internal error in pointer analysis
}
_ = result // TODO(adonovan): measure something
t1 := time.Now()
// Dump some statistics.
allFuncs := ssautil.AllFunctions(prog)
var numInstrs int
for fn := range allFuncs {
for _, b := range fn.Blocks {
numInstrs += len(b.Instrs)
}
}
// determine line count
var lineCount int
prog.Fset.Iterate(func(f *token.File) bool {
lineCount += f.LineCount()
return true
})
t.Log("#Source lines: ", lineCount)
t.Log("#Instructions: ", numInstrs)
t.Log("Pointer analysis: ", t1.Sub(t0))
}

View File

@@ -0,0 +1,42 @@
// +build ignore
package a
// This test exercises the synthesis of testmain packages for tests.
// The test framework doesn't directly let us perform negative
// assertions (i.e. that TestingQuux isn't called, or that its
// parameter's PTS is empty) so this test is rather roundabout.
import "testing"
func log(f func(*testing.T)) {
// The PTS of f is the set of called tests. TestingQuux is not present.
print(f) // @pointsto main.Test | main.TestFoo
}
func Test(t *testing.T) {
// Don't assert @pointsto(t) since its label contains a fragile line number.
log(Test)
}
func TestFoo(t *testing.T) {
// Don't assert @pointsto(t) since its label contains a fragile line number.
log(TestFoo)
}
func TestingQuux(t *testing.T) {
// We can't assert @pointsto(t) since this is dead code.
log(TestingQuux)
}
func BenchmarkFoo(b *testing.B) {
}
func ExampleBar() {
}
// Excludes TestingQuux.
// @calls testing.tRunner -> main.Test
// @calls testing.tRunner -> main.TestFoo
// @calls testing.runExample -> main.ExampleBar
// @calls (*testing.B).runN -> main.BenchmarkFoo

View File

@@ -0,0 +1,36 @@
// +build ignore
package main
var unknown bool
type S string
func incr(x int) int { return x + 1 }
func main() {
var i interface{}
i = 1
if unknown {
i = S("foo")
}
if unknown {
i = (func(int, int))(nil) // NB type compares equal to that below.
}
// Look, the test harness can handle equal-but-not-String-equal
// types because we parse types and using a typemap.
if unknown {
i = (func(x int, y int))(nil)
}
if unknown {
i = incr
}
print(i) // @types int | S | func(int, int) | func(int) int
// NB, an interface may never directly alias any global
// labels, even though it may contain pointers that do.
print(i) // @pointsto makeinterface:func(x int) int | makeinterface:func(x int, y int) | makeinterface:func(int, int) | makeinterface:int | makeinterface:main.S
print(i.(func(int) int)) // @pointsto main.incr
print() // regression test for crash
}

View File

@@ -0,0 +1,191 @@
// +build ignore
package main
// Test of arrays & slices with reflection.
import "reflect"
var a, b int
type S string
func reflectValueSlice() {
// reflect.Value contains a slice.
slice := make([]*int, 10) // @line slice
slice[0] = &a
rvsl := reflect.ValueOf(slice).Slice(0, 0)
print(rvsl.Interface()) // @types []*int
print(rvsl.Interface().([]*int)) // @pointsto makeslice@slice:15
print(rvsl.Interface().([]*int)[42]) // @pointsto main.a
// reflect.Value contains an array (non-addressable).
array := [10]*int{&a} // @line array
rvarray := reflect.ValueOf(array).Slice(0, 0)
print(rvarray.Interface()) // @types
print(rvarray.Interface().([]*int)) // @pointsto
print(rvarray.Interface().([]*int)[42]) // @pointsto
// reflect.Value contains a pointer-to-array
rvparray := reflect.ValueOf(&array).Slice(0, 0)
print(rvparray.Interface()) // @types []*int
print(rvparray.Interface().([]*int)) // @pointsto array@array:2
print(rvparray.Interface().([]*int)[42]) // @pointsto main.a
// reflect.Value contains a string.
rvstring := reflect.ValueOf("hi").Slice(0, 0)
print(rvstring.Interface()) // @types string
// reflect.Value contains a (named) string type.
rvS := reflect.ValueOf(S("hi")).Slice(0, 0)
print(rvS.Interface()) // @types S
// reflect.Value contains a non-array pointer.
rvptr := reflect.ValueOf(new(int)).Slice(0, 0)
print(rvptr.Interface()) // @types
// reflect.Value contains a non-string basic type.
rvint := reflect.ValueOf(3).Slice(0, 0)
print(rvint.Interface()) // @types
}
func reflectValueBytes() {
sl1 := make([]byte, 0) // @line ar5sl1
sl2 := make([]byte, 0) // @line ar5sl2
rvsl1 := reflect.ValueOf(sl1)
print(rvsl1.Interface()) // @types []byte
print(rvsl1.Interface().([]byte)) // @pointsto makeslice@ar5sl1:13
print(rvsl1.Bytes()) // @pointsto makeslice@ar5sl1:13
rvsl2 := reflect.ValueOf(123)
rvsl2.SetBytes(sl2)
print(rvsl2.Interface()) // @types int
print(rvsl2.Interface().([]byte)) // @pointsto
print(rvsl2.Bytes()) // @pointsto
rvsl3 := reflect.ValueOf([]byte(nil))
rvsl3.SetBytes(sl2)
print(rvsl3.Interface()) // @types []byte
print(rvsl3.Interface().([]byte)) // @pointsto makeslice@ar5sl2:13
print(rvsl3.Bytes()) // @pointsto makeslice@ar5sl2:13
}
func reflectValueIndex() {
slice := []*int{&a} // @line ar6slice
rv1 := reflect.ValueOf(slice)
print(rv1.Index(42).Interface()) // @types *int
print(rv1.Index(42).Interface().(*int)) // @pointsto main.a
array := [10]*int{&a}
rv2 := reflect.ValueOf(array)
print(rv2.Index(42).Interface()) // @types *int
print(rv2.Index(42).Interface().(*int)) // @pointsto main.a
rv3 := reflect.ValueOf("string")
print(rv3.Index(42).Interface()) // @types rune
rv4 := reflect.ValueOf(&array)
print(rv4.Index(42).Interface()) // @types
rv5 := reflect.ValueOf(3)
print(rv5.Index(42).Interface()) // @types
}
func reflectValueElem() {
// Interface.
var iface interface{} = &a
rv1 := reflect.ValueOf(&iface).Elem()
print(rv1.Interface()) // @types *int
print(rv1.Interface().(*int)) // @pointsto main.a
print(rv1.Elem().Interface()) // @types *int
print(rv1.Elem().Interface().(*int)) // @pointsto main.a
print(reflect.ValueOf(new(interface{})).Elem().Elem()) // @types
// Pointer.
ptr := &a
rv2 := reflect.ValueOf(&ptr)
print(rv2.Elem().Interface()) // @types *int
print(rv2.Elem().Interface().(*int)) // @pointsto main.a
// No other type works with (rV).Elem, not even those that
// work with (rT).Elem: slice, array, map, chan.
rv3 := reflect.ValueOf([]*int{&a})
print(rv3.Elem().Interface()) // @types
rv4 := reflect.ValueOf([10]*int{&a})
print(rv4.Elem().Interface()) // @types
rv5 := reflect.ValueOf(map[*int]*int{&a: &b})
print(rv5.Elem().Interface()) // @types
ch := make(chan *int)
ch <- &a
rv6 := reflect.ValueOf(ch)
print(rv6.Elem().Interface()) // @types
rv7 := reflect.ValueOf(3)
print(rv7.Elem().Interface()) // @types
}
func reflectTypeElem() {
rt1 := reflect.TypeOf(make([]*int, 0))
print(reflect.Zero(rt1.Elem())) // @types *int
rt2 := reflect.TypeOf([10]*int{})
print(reflect.Zero(rt2.Elem())) // @types *int
rt3 := reflect.TypeOf(map[*int]*int{})
print(reflect.Zero(rt3.Elem())) // @types *int
rt4 := reflect.TypeOf(make(chan *int))
print(reflect.Zero(rt4.Elem())) // @types *int
ptr := &a
rt5 := reflect.TypeOf(&ptr)
print(reflect.Zero(rt5.Elem())) // @types *int
rt6 := reflect.TypeOf(3)
print(reflect.Zero(rt6.Elem())) // @types
}
func reflectPtrTo() {
tInt := reflect.TypeOf(3)
tPtrInt := reflect.PtrTo(tInt)
print(reflect.Zero(tPtrInt)) // @types *int
tPtrPtrInt := reflect.PtrTo(tPtrInt)
print(reflect.Zero(tPtrPtrInt)) // @types **int
}
func reflectSliceOf() {
tInt := reflect.TypeOf(3)
tSliceInt := reflect.SliceOf(tInt)
print(reflect.Zero(tSliceInt)) // @types []int
}
type T struct{ x int }
func reflectMakeSlice() {
rt := []reflect.Type{
reflect.TypeOf(3),
reflect.TypeOf([]int{}),
reflect.TypeOf([]T{}),
}[0]
sl := reflect.MakeSlice(rt, 0, 0)
print(sl) // @types []int | []T
print(sl) // @pointsto <alloc in reflect.MakeSlice> | <alloc in reflect.MakeSlice>
print(&sl.Interface().([]T)[0].x) // @pointsto <alloc in reflect.MakeSlice>[*].x
}
func main() {
reflectValueSlice()
reflectValueBytes()
reflectValueIndex()
reflectValueElem()
reflectTypeElem()
reflectPtrTo()
reflectSliceOf()
reflectMakeSlice()
}

View File

@@ -0,0 +1,97 @@
// +build ignore
package main
var unknown bool // defeat dead-code elimination
var a, b int
func array1() {
sliceA := make([]*int, 10) // @line a1make
sliceA[0] = &a
var sliceB []*int
sliceB = append(sliceB, &b) // @line a1append
print(sliceA) // @pointsto makeslice@a1make:16
print(sliceA[0]) // @pointsto main.a
print(sliceB) // @pointsto append@a1append:17
print(sliceB[100]) // @pointsto main.b
}
func array2() {
sliceA := make([]*int, 10) // @line a2make
sliceA[0] = &a
sliceB := sliceA[:]
print(sliceA) // @pointsto makeslice@a2make:16
print(sliceA[0]) // @pointsto main.a
print(sliceB) // @pointsto makeslice@a2make:16
print(sliceB[0]) // @pointsto main.a
}
func array3() {
a := []interface{}{"", 1}
b := []interface{}{true, func() {}}
print(a[0]) // @types string | int
print(b[0]) // @types bool | func()
}
// Test of append, copy, slice.
func array4() {
var s2 struct { // @line a4L0
a [3]int
b struct{ c, d int }
}
var sl1 = make([]*int, 10) // @line a4make
var someint int // @line a4L1
sl1[1] = &someint
sl2 := append(sl1, &s2.a[1]) // @line a4append1
print(sl1) // @pointsto makeslice@a4make:16
print(sl2) // @pointsto append@a4append1:15 | makeslice@a4make:16
print(sl1[0]) // @pointsto someint@a4L1:6 | s2.a[*]@a4L0:6
print(sl2[0]) // @pointsto someint@a4L1:6 | s2.a[*]@a4L0:6
// In z=append(x,y) we should observe flow from y[*] to x[*].
var sl3 = make([]*int, 10) // @line a4L2
_ = append(sl3, &s2.a[1])
print(sl3) // @pointsto makeslice@a4L2:16
print(sl3[0]) // @pointsto s2.a[*]@a4L0:6
var sl4 = []*int{&a} // @line a4L3
sl4a := append(sl4) // @line a4L4
print(sl4a) // @pointsto slicelit@a4L3:18 | append@a4L4:16
print(&sl4a[0]) // @pointsto slicelit[*]@a4L3:18 | append[*]@a4L4:16
print(sl4a[0]) // @pointsto main.a
var sl5 = []*int{&b} // @line a4L5
copy(sl5, sl4)
print(sl5) // @pointsto slicelit@a4L5:18
print(&sl5[0]) // @pointsto slicelit[*]@a4L5:18
print(sl5[0]) // @pointsto main.b | main.a
var sl6 = sl5[:0]
print(sl6) // @pointsto slicelit@a4L5:18
print(&sl6[0]) // @pointsto slicelit[*]@a4L5:18
print(sl6[0]) // @pointsto main.b | main.a
}
func array5() {
var arr [2]*int
arr[0] = &a
arr[1] = &b
var n int
print(arr[n]) // @pointsto main.a | main.b
}
func main() {
array1()
array2()
array3()
array4()
array5()
}

View File

@@ -0,0 +1,118 @@
// +build ignore
package main
func incr(x int) int { return x + 1 }
func decr(x int) int { return x - 1 }
var unknown bool // defeat dead-code elimination
func chan1() {
chA := make(chan func(int) int, 0) // @line c1makeA
chB := make(chan func(int) int, 0) // @line c1makeB
chA <- incr
chB <- decr
chB <- func(int) int { return 1 }
print(chA) // @pointsto makechan@c1makeA:13
print(<-chA) // @pointsto main.incr
print(chB) // @pointsto makechan@c1makeB:13
print(<-chB) // @pointsto main.decr | main.chan1$1
}
func chan2() {
chA := make(chan func(int) int, 0) // @line c2makeA
chB := make(chan func(int) int, 0) // @line c2makeB
chA <- incr
chB <- decr
chB <- func(int) int { return 1 }
// Channels flow together.
// Labelsets remain distinct but elements are merged.
chAB := chA
if unknown {
chAB = chB
}
print(chA) // @pointsto makechan@c2makeA:13
print(<-chA) // @pointsto main.incr
print(chB) // @pointsto makechan@c2makeB:13
print(<-chB) // @pointsto main.decr | main.chan2$1
print(chAB) // @pointsto makechan@c2makeA:13 | makechan@c2makeB:13
print(<-chAB) // @pointsto main.incr | main.decr | main.chan2$1
(<-chA)(3)
}
// @calls main.chan2 -> main.incr
func chan3() {
chA := make(chan func(int) int, 0) // @line c3makeA
chB := make(chan func(int) int, 0) // @line c3makeB
chA <- incr
chB <- decr
chB <- func(int) int { return 1 }
print(chA) // @pointsto makechan@c3makeA:13
print(<-chA) // @pointsto main.incr
print(chB) // @pointsto makechan@c3makeB:13
print(<-chB) // @pointsto main.decr | main.chan3$1
(<-chA)(3)
}
// @calls main.chan3 -> main.incr
func chan4() {
chA := make(chan func(int) int, 0) // @line c4makeA
chB := make(chan func(int) int, 0) // @line c4makeB
select {
case chA <- incr:
case chB <- decr:
case a := <-chA:
print(a) // @pointsto main.incr
case b := <-chB:
print(b) // @pointsto main.decr
default:
print(chA) // @pointsto makechan@c4makeA:13
print(chB) // @pointsto makechan@c4makeB:13
}
for k := range chA {
print(k) // @pointsto main.incr
}
// Exercise constraint generation (regtest for a crash).
for range chA {
}
}
// Multi-word channel value in select with multiple receive cases.
// (Regtest for a crash.)
func chan5() {
type T struct {
x *int
y interface{}
}
ch := make(chan T)
ch <- T{new(int), incr} // @line ch5new
select {
case a := <-ch:
print(a.x) // @pointsto new@ch5new:13
print(a.y) // @types func(x int) int
case b := <-ch:
print(b.x) // @pointsto new@ch5new:13
print(b.y) // @types func(x int) int
}
}
func main() {
chan1()
chan2()
chan3()
chan4()
chan5()
}

View File

@@ -0,0 +1,85 @@
// +build ignore
package main
import "reflect"
// Test of channels with reflection.
var a, b int
func chanreflect1() {
ch := make(chan *int, 0) // @line cr1make
crv := reflect.ValueOf(ch)
crv.Send(reflect.ValueOf(&a))
print(crv.Interface()) // @types chan *int
print(crv.Interface().(chan *int)) // @pointsto makechan@cr1make:12
print(<-ch) // @pointsto main.a
}
func chanreflect1i() {
// Exercises reflect.Value conversions to/from interfaces:
// a different code path than for concrete types.
ch := make(chan interface{}, 0)
reflect.ValueOf(ch).Send(reflect.ValueOf(&a))
v := <-ch
print(v) // @types *int
print(v.(*int)) // @pointsto main.a
}
func chanreflect2() {
ch := make(chan *int, 0)
ch <- &b
crv := reflect.ValueOf(ch)
r, _ := crv.Recv()
print(r.Interface()) // @types *int
print(r.Interface().(*int)) // @pointsto main.b
}
func chanOfRecv() {
// MakeChan(<-chan) is a no-op.
t := reflect.ChanOf(reflect.RecvDir, reflect.TypeOf(&a))
print(reflect.Zero(t).Interface()) // @types <-chan *int
print(reflect.MakeChan(t, 0).Interface().(<-chan *int)) // @pointsto
print(reflect.MakeChan(t, 0).Interface().(chan *int)) // @pointsto
}
func chanOfSend() {
// MakeChan(chan<-) is a no-op.
t := reflect.ChanOf(reflect.SendDir, reflect.TypeOf(&a))
print(reflect.Zero(t).Interface()) // @types chan<- *int
print(reflect.MakeChan(t, 0).Interface().(chan<- *int)) // @pointsto
print(reflect.MakeChan(t, 0).Interface().(chan *int)) // @pointsto
}
func chanOfBoth() {
t := reflect.ChanOf(reflect.BothDir, reflect.TypeOf(&a))
print(reflect.Zero(t).Interface()) // @types chan *int
ch := reflect.MakeChan(t, 0)
print(ch.Interface().(chan *int)) // @pointsto <alloc in reflect.MakeChan>
ch.Send(reflect.ValueOf(&b))
ch.Interface().(chan *int) <- &a
r, _ := ch.Recv()
print(r.Interface().(*int)) // @pointsto main.a | main.b
print(<-ch.Interface().(chan *int)) // @pointsto main.a | main.b
}
var unknownDir reflect.ChanDir // not a constant
func chanOfUnknown() {
// Unknown channel direction: assume all three.
// MakeChan only works on the bi-di channel type.
t := reflect.ChanOf(unknownDir, reflect.TypeOf(&a))
print(reflect.Zero(t).Interface()) // @types <-chan *int | chan<- *int | chan *int
print(reflect.MakeChan(t, 0).Interface()) // @types chan *int
}
func main() {
chanreflect1()
chanreflect1i()
chanreflect2()
chanOfRecv()
chanOfSend()
chanOfBoth()
chanOfUnknown()
}

View File

@@ -0,0 +1,35 @@
// +build ignore
package main
import "reflect"
//
// This test is very sensitive to line-number perturbations!
// Test of channels with reflection.
var a, b int
func chanreflect1() {
ch := make(chan *int, 0)
crv := reflect.ValueOf(ch)
crv.Send(reflect.ValueOf(&a))
print(crv.Interface()) // @types chan *int
print(crv.Interface().(chan *int)) // @pointsto makechan@testdata/chanreflect.go:15:12
print(<-ch) // @pointsto main.a
}
func chanreflect2() {
ch := make(chan *int, 0)
ch <- &b
crv := reflect.ValueOf(ch)
r, _ := crv.Recv()
print(r.Interface()) // @types *int
print(r.Interface().(*int)) // @pointsto main.b
}
func main() {
chanreflect1()
chanreflect2()
}

View File

@@ -0,0 +1,48 @@
// +build ignore
package main
// Test of context-sensitive treatment of certain function calls,
// e.g. static calls to simple accessor methods.
var a, b int
type T struct{ x *int }
func (t *T) SetX(x *int) { t.x = x }
func (t *T) GetX() *int { return t.x }
func context1() {
var t1, t2 T
t1.SetX(&a)
t2.SetX(&b)
print(t1.GetX()) // @pointsto main.a
print(t2.GetX()) // @pointsto main.b
}
func context2() {
id := func(x *int) *int {
print(x) // @pointsto main.a | main.b
return x
}
print(id(&a)) // @pointsto main.a
print(id(&b)) // @pointsto main.b
// Same again, but anon func has free vars.
var c int // @line context2c
id2 := func(x *int) (*int, *int) {
print(x) // @pointsto main.a | main.b
return x, &c
}
p, q := id2(&a)
print(p) // @pointsto main.a
print(q) // @pointsto c@context2c:6
r, s := id2(&b)
print(r) // @pointsto main.b
print(s) // @pointsto c@context2c:6
}
func main() {
context1()
context2()
}

63
vendor/golang.org/x/tools/go/pointer/testdata/conv.go generated vendored Normal file
View File

@@ -0,0 +1,63 @@
// +build ignore
package main
import "unsafe"
var a int
func conv1() {
// Conversions of channel direction.
ch := make(chan int) // @line c1make
print((<-chan int)(ch)) // @pointsto makechan@c1make:12
print((chan<- int)(ch)) // @pointsto makechan@c1make:12
}
func conv2() {
// string -> []byte/[]rune conversion
s := "foo"
ba := []byte(s) // @line c2ba
ra := []rune(s) // @line c2ra
print(ba) // @pointsto convert@c2ba:14
print(ra) // @pointsto convert@c2ra:14
}
func conv3() {
// Conversion of same underlying types.
type PI *int
pi := PI(&a)
print(pi) // @pointsto main.a
pint := (*int)(pi)
print(pint) // @pointsto main.a
// Conversions between pointers to identical base types.
var y *PI = &pi
var x **int = (**int)(y)
print(*x) // @pointsto main.a
print(*y) // @pointsto main.a
y = (*PI)(x)
print(*y) // @pointsto main.a
}
func conv4() {
// Handling of unsafe.Pointer conversion is unsound:
// we lose the alias to main.a and get something like new(int) instead.
p := (*int)(unsafe.Pointer(&a)) // @line c2p
print(p) // @pointsto convert@c2p:13
}
// Regression test for b/8231.
func conv5() {
type P unsafe.Pointer
var i *struct{}
_ = P(i)
}
func main() {
conv1()
conv2()
conv3()
conv4()
conv5()
}

View File

@@ -0,0 +1,21 @@
// +build ignore
package main
var a int
type t struct {
a *map[string]chan *int
}
func fn() []t {
m := make(map[string]chan *int)
m[""] = make(chan *int, 1)
m[""] <- &a
return []t{t{a: &m}}
}
func main() {
x := fn()
print(x) // @pointstoquery <-(*x[i].a)[key] main.a
}

View File

@@ -0,0 +1,89 @@
package main
import "runtime"
func final1a(x *int) int {
print(x) // @pointsto new@newint:10
return *x
}
func final1b(x *bool) {
print(x) // @pointsto
}
func runtimeSetFinalizer1() {
x := new(int) // @line newint
runtime.SetFinalizer(x, final1a) // ok: final1a's result is ignored
runtime.SetFinalizer(x, final1b) // param type mismatch: no effect
}
// @calls main.runtimeSetFinalizer1 -> main.final1a
// @calls main.runtimeSetFinalizer1 -> main.final1b
func final2a(x *bool) {
print(x) // @pointsto new@newbool1:10 | new@newbool2:10
}
func final2b(x *bool) {
print(x) // @pointsto new@newbool1:10 | new@newbool2:10
}
func runtimeSetFinalizer2() {
x := new(bool) // @line newbool1
f := final2a
if unknown {
x = new(bool) // @line newbool2
f = final2b
}
runtime.SetFinalizer(x, f)
}
// @calls main.runtimeSetFinalizer2 -> main.final2a
// @calls main.runtimeSetFinalizer2 -> main.final2b
type T int
func (t *T) finalize() {
print(t) // @pointsto new@final3:10
}
func runtimeSetFinalizer3() {
x := new(T) // @line final3
runtime.SetFinalizer(x, (*T).finalize)
}
// @calls main.runtimeSetFinalizer3 -> (*main.T).finalize$thunk
// I hope I never live to see this code in the wild.
var setFinalizer = runtime.SetFinalizer
func final4(x *int) {
print(x) // @pointsto new@finalIndirect:10
}
func runtimeSetFinalizerIndirect() {
// In an indirect call, the shared contour for SetFinalizer is
// used, i.e. the call is not inlined and appears in the call graph.
x := new(int) // @line finalIndirect
setFinalizer(x, final4)
}
// Exercise the elimination of SetFinalizer
// constraints with non-pointer operands.
func runtimeSetFinalizerNonpointer() {
runtime.SetFinalizer(nil, (*T).finalize) // x is a non-pointer
runtime.SetFinalizer((*T).finalize, nil) // f is a non-pointer
}
// @calls main.runtimeSetFinalizerIndirect -> runtime.SetFinalizer
// @calls runtime.SetFinalizer -> main.final4
func main() {
runtimeSetFinalizer1()
runtimeSetFinalizer2()
runtimeSetFinalizer3()
runtimeSetFinalizerIndirect()
runtimeSetFinalizerNonpointer()
}
var unknown bool // defeat dead-code elimination

63
vendor/golang.org/x/tools/go/pointer/testdata/flow.go generated vendored Normal file
View File

@@ -0,0 +1,63 @@
// +build ignore
package main
// Demonstration of directionality of flow edges.
func f1() {}
func f2() {}
var somepred bool
// Tracking functions.
func flow1() {
s := f1
p := f2
q := p
r := q
if somepred {
r = s
}
print(s) // @pointsto main.f1
print(p) // @pointsto main.f2
print(q) // @pointsto main.f2
print(r) // @pointsto main.f1 | main.f2
}
// Tracking concrete types in interfaces.
func flow2() {
var s interface{} = 1
var p interface{} = "foo"
q := p
r := q
if somepred {
r = s
}
print(s) // @types int
print(p) // @types string
print(q) // @types string
print(r) // @types int | string
}
var g1, g2 int
// Tracking addresses of globals.
func flow3() {
s := &g1
p := &g2
q := p
r := q
if somepred {
r = s
}
print(s) // @pointsto main.g1
print(p) // @pointsto main.g2
print(q) // @pointsto main.g2
print(r) // @pointsto main.g2 | main.g1
}
func main() {
flow1()
flow2()
flow3()
}

View File

@@ -0,0 +1,42 @@
// +build ignore
// This is a slice of the fmt package.
package main
type pp struct {
field interface{}
}
func newPrinter() *pp {
return new(pp)
}
func Fprintln(a ...interface{}) {
p := newPrinter()
p.doPrint(a, true, true)
}
func Println(a ...interface{}) {
Fprintln(a...)
}
func (p *pp) doPrint(a []interface{}, addspace, addnewline bool) {
print(a[0]) // @types S | string
stringer := a[0].(interface {
String() string
})
stringer.String()
print(stringer) // @types S
}
type S int
func (S) String() string { return "" }
func main() {
Println("Hello, World!", S(0))
}
// @calls (*main.pp).doPrint -> (main.S).String

205
vendor/golang.org/x/tools/go/pointer/testdata/func.go generated vendored Normal file
View File

@@ -0,0 +1,205 @@
// +build ignore
package main
var a, b, c int
var unknown bool // defeat dead-code elimination
func func1() {
var h int // @line f1h
f := func(x *int) *int {
if unknown {
return &b
}
return x
}
// FV(g) = {f, h}
g := func(x *int) *int {
if unknown {
return &h
}
return f(x)
}
print(g(&a)) // @pointsto main.a | main.b | h@f1h:6
print(f(&a)) // @pointsto main.a | main.b
print(&a) // @pointsto main.a
}
// @calls main.func1 -> main.func1$2
// @calls main.func1 -> main.func1$1
// @calls main.func1$2 -> main.func1$1
func func2() {
var x, y *int
defer func() {
x = &a
}()
go func() {
y = &b
}()
print(x) // @pointsto main.a
print(y) // @pointsto main.b
}
func func3() {
x, y := func() (x, y *int) {
x = &a
y = &b
if unknown {
return nil, &c
}
return
}()
print(x) // @pointsto main.a
print(y) // @pointsto main.b | main.c
}
func swap(x, y *int) (*int, *int) { // @line swap
print(&x) // @pointsto x@swap:11
print(x) // @pointsto makeslice[*]@func4make:11
print(&y) // @pointsto y@swap:14
print(y) // @pointsto j@f4j:5
return y, x
}
func func4() {
a := make([]int, 10) // @line func4make
i, j := 123, 456 // @line f4j
_ = i
p, q := swap(&a[3], &j)
print(p) // @pointsto j@f4j:5
print(q) // @pointsto makeslice[*]@func4make:11
f := &b
print(f) // @pointsto main.b
}
type T int
func (t *T) f(x *int) *int {
print(t) // @pointsto main.a
print(x) // @pointsto main.c
return &b
}
func (t *T) g(x *int) *int {
print(t) // @pointsto main.a
print(x) // @pointsto main.b
return &c
}
func (t *T) h(x *int) *int {
print(t) // @pointsto main.a
print(x) // @pointsto main.b
return &c
}
var h func(*T, *int) *int
func func5() {
// Static call of method.
t := (*T)(&a)
print(t.f(&c)) // @pointsto main.b
// Static call of method as function
print((*T).g(t, &b)) // @pointsto main.c
// Dynamic call (not invoke) of method.
h = (*T).h
print(h(t, &b)) // @pointsto main.c
}
// @calls main.func5 -> (*main.T).f
// @calls main.func5 -> (*main.T).g$thunk
// @calls main.func5 -> (*main.T).h$thunk
func func6() {
A := &a
f := func() *int {
return A // (free variable)
}
print(f()) // @pointsto main.a
}
// @calls main.func6 -> main.func6$1
type I interface {
f()
}
type D struct{}
func (D) f() {}
func func7() {
var i I = D{}
imethodClosure := i.f
imethodClosure()
// @calls main.func7 -> (main.I).f$bound
// @calls (main.I).f$bound -> (main.D).f
var d D
cmethodClosure := d.f
cmethodClosure()
// @calls main.func7 -> (main.D).f$bound
// @calls (main.D).f$bound ->(main.D).f
methodExpr := D.f
methodExpr(d)
// @calls main.func7 -> (main.D).f$thunk
}
func func8(x ...int) {
print(&x[0]) // @pointsto varargs[*]@varargs:15
}
type E struct {
x1, x2, x3, x4, x5 *int
}
func (e E) f() {}
func func9() {
// Regression test for bug reported by Jon Valdes on golang-dev, Jun 19 2014.
// The receiver of a bound method closure may be of a multi-node type, E.
// valueNode was reserving only a single node for it, so the
// nodes used by the immediately following constraints
// (e.g. param 'i') would get clobbered.
var e E
e.x1 = &a
e.x2 = &a
e.x3 = &a
e.x4 = &a
e.x5 = &a
_ = e.f // form a closure---must reserve sizeof(E) nodes
func(i I) {
i.f() // must not crash the solver
}(new(D))
print(e.x1) // @pointsto main.a
print(e.x2) // @pointsto main.a
print(e.x3) // @pointsto main.a
print(e.x4) // @pointsto main.a
print(e.x5) // @pointsto main.a
}
func main() {
func1()
func2()
func3()
func4()
func5()
func6()
func7()
func8(1, 2, 3) // @line varargs
func9()
}
// @calls <root> -> main.main
// @calls <root> -> main.init

View File

@@ -0,0 +1,130 @@
// +build ignore
package main
import "reflect"
var zero, a, b int
var false2 bool
func f(p *int, q hasF) *int {
print(p) // @pointsto main.a
print(q) // @types *T
print(q.(*T)) // @pointsto new@newT1:22
return &b
}
func g(p *bool) (*int, *bool, hasF) {
return &b, p, new(T) // @line newT2
}
func reflectValueCall() {
rvf := reflect.ValueOf(f)
res := rvf.Call([]reflect.Value{
// argument order is not significant:
reflect.ValueOf(new(T)), // @line newT1
reflect.ValueOf(&a),
})
print(res[0].Interface()) // @types *int
print(res[0].Interface().(*int)) // @pointsto main.b
}
// @calls main.reflectValueCall -> main.f
func reflectValueCallIndirect() {
rvf := reflect.ValueOf(g)
call := rvf.Call // kids, don't try this at home
// Indirect call uses shared contour.
//
// Also notice that argument position doesn't matter, and args
// of inappropriate type (e.g. 'a') are ignored.
res := call([]reflect.Value{
reflect.ValueOf(&a),
reflect.ValueOf(&false2),
})
res0 := res[0].Interface()
print(res0) // @types *int | *bool | *T
print(res0.(*int)) // @pointsto main.b
print(res0.(*bool)) // @pointsto main.false2
print(res0.(hasF)) // @types *T
print(res0.(*T)) // @pointsto new@newT2:19
}
// @calls main.reflectValueCallIndirect -> (reflect.Value).Call$bound
// @calls (reflect.Value).Call$bound -> main.g
func reflectTypeInOut() {
var f func(float64, bool) (string, int)
print(reflect.Zero(reflect.TypeOf(f).In(0)).Interface()) // @types float64
print(reflect.Zero(reflect.TypeOf(f).In(1)).Interface()) // @types bool
print(reflect.Zero(reflect.TypeOf(f).In(-1)).Interface()) // @types float64 | bool
print(reflect.Zero(reflect.TypeOf(f).In(zero)).Interface()) // @types float64 | bool
print(reflect.Zero(reflect.TypeOf(f).Out(0)).Interface()) // @types string
print(reflect.Zero(reflect.TypeOf(f).Out(1)).Interface()) // @types int
print(reflect.Zero(reflect.TypeOf(f).Out(2)).Interface()) // @types
print(reflect.Zero(reflect.TypeOf(3).Out(0)).Interface()) // @types
}
type hasF interface {
F()
}
type T struct{}
func (T) F() {}
func (T) g(int) {}
type U struct{}
func (U) F(int) {}
func (U) g(string) {}
type I interface {
f()
}
var nonconst string
func reflectTypeMethodByName() {
TU := reflect.TypeOf([]interface{}{T{}, U{}}[0])
print(reflect.Zero(TU)) // @types T | U
F, _ := TU.MethodByName("F")
print(reflect.Zero(F.Type)) // @types func(T) | func(U, int)
print(F.Func) // @pointsto (main.T).F | (main.U).F
g, _ := TU.MethodByName("g")
print(reflect.Zero(g.Type)) // @types func(T, int) | func(U, string)
print(g.Func) // @pointsto (main.T).g | (main.U).g
// Non-literal method names are treated less precisely.
U := reflect.TypeOf(U{})
X, _ := U.MethodByName(nonconst)
print(reflect.Zero(X.Type)) // @types func(U, int) | func(U, string)
print(X.Func) // @pointsto (main.U).F | (main.U).g
// Interface methods.
rThasF := reflect.TypeOf(new(hasF)).Elem()
print(reflect.Zero(rThasF)) // @types hasF
F2, _ := rThasF.MethodByName("F")
print(reflect.Zero(F2.Type)) // @types func()
print(F2.Func) // @pointsto
}
func reflectTypeMethod() {
m := reflect.TypeOf(T{}).Method(0)
print(reflect.Zero(m.Type)) // @types func(T) | func(T, int)
print(m.Func) // @pointsto (main.T).F | (main.T).g
}
func main() {
reflectValueCall()
reflectValueCallIndirect()
reflectTypeInOut()
reflectTypeMethodByName()
reflectTypeMethod()
}

27
vendor/golang.org/x/tools/go/pointer/testdata/hello.go generated vendored Normal file
View File

@@ -0,0 +1,27 @@
// +build ignore
package main
import (
"fmt"
"os"
)
type S int
var theS S
func (s *S) String() string {
print(s) // @pointsto main.theS
return ""
}
func main() {
// os.Args is considered intrinsically allocated,
// but may also be set explicitly (e.g. on Windows), hence '...'.
print(os.Args) // @pointsto <command-line args> | ...
fmt.Println("Hello, World!", &theS)
}
// @calls main.main -> fmt.Println
// @calls (*fmt.pp).handleMethods -> (*main.S).String

View File

@@ -0,0 +1,152 @@
// +build ignore
package main
type I interface {
f()
}
type C int
func (*C) f() {}
type D struct{ ptr *int }
func (D) f() {}
type E struct{}
func (*E) f() {}
var a, b int
var unknown bool // defeat dead-code elimination
func interface1() {
var i interface{} = &a
var j interface{} = D{&b}
k := j
if unknown {
k = i
}
print(i) // @types *int
print(j) // @types D
print(k) // @types *int | D
print(i.(*int)) // @pointsto main.a
print(j.(*int)) // @pointsto
print(k.(*int)) // @pointsto main.a
print(i.(D).ptr) // @pointsto
print(j.(D).ptr) // @pointsto main.b
print(k.(D).ptr) // @pointsto main.b
}
func interface2() {
var i I = (*C)(&a)
var j I = D{&a}
k := j
if unknown {
k = i
}
print(i) // @types *C
print(j) // @types D
print(k) // @types *C | D
print(k) // @pointsto makeinterface:main.D | makeinterface:*main.C
k.f()
// @calls main.interface2 -> (*main.C).f
// @calls main.interface2 -> (main.D).f
print(i.(*C)) // @pointsto main.a
print(j.(D).ptr) // @pointsto main.a
print(k.(*C)) // @pointsto main.a
switch x := k.(type) {
case *C:
print(x) // @pointsto main.a
case D:
print(x.ptr) // @pointsto main.a
case *E:
print(x) // @pointsto
}
}
func interface3() {
// There should be no backflow of concrete types from the type-switch to x.
var x interface{} = 0
print(x) // @types int
switch x.(type) {
case int:
case string:
}
}
func interface4() {
var i interface{} = D{&a}
if unknown {
i = 123
}
print(i) // @types int | D
j := i.(I) // interface narrowing type-assertion
print(j) // @types D
print(j.(D).ptr) // @pointsto main.a
var l interface{} = j // interface widening assignment.
print(l) // @types D
print(l.(D).ptr) // @pointsto main.a
m := j.(interface{}) // interface widening type-assertion.
print(m) // @types D
print(m.(D).ptr) // @pointsto main.a
}
// Interface method calls and value flow:
type J interface {
f(*int) *int
}
type P struct {
x int
}
func (p *P) f(pi *int) *int {
print(p) // @pointsto p@i5p:6
print(pi) // @pointsto i@i5i:6
return &p.x
}
func interface5() {
var p P // @line i5p
var j J = &p
var i int // @line i5i
print(j.f(&i)) // @pointsto p.x@i5p:6
print(&i) // @pointsto i@i5i:6
print(j) // @pointsto makeinterface:*main.P
}
// @calls main.interface5 -> (*main.P).f
func interface6() {
f := I.f
print(f) // @pointsto (main.I).f$thunk
f(new(struct{ D }))
}
// @calls main.interface6 -> (main.I).f$thunk
// @calls (main.I).f$thunk -> (*struct{main.D}).f
func main() {
interface1()
interface2()
interface3()
interface4()
interface5()
interface6()
}

View File

@@ -0,0 +1,17 @@
package main
func main() {
// Regression test for golang issue 9002.
//
// The two-result "value,ok" receive operation generated a
// too-wide constraint loading (value int, ok bool), not bool,
// from the channel.
//
// This bug manifested itself in an out-of-bounds array access
// when the makechan object was the highest-numbered node, as in
// this program.
//
// In more realistic programs it silently resulted in bogus
// constraints.
_, _ = <-make(chan int)
}

View File

@@ -0,0 +1,117 @@
// +build ignore
package main
// Test of maps with reflection.
import "reflect"
var a int
var b bool
func reflectMapKeysIndex() {
m := make(map[*int]*bool) // @line mr1make
m[&a] = &b
mrv := reflect.ValueOf(m)
print(mrv.Interface()) // @types map[*int]*bool
print(mrv.Interface().(map[*int]*bool)) // @pointsto makemap@mr1make:11
print(mrv) // @pointsto makeinterface:map[*int]*bool
print(mrv) // @types map[*int]*bool
keys := mrv.MapKeys()
print(keys) // @pointsto <alloc in (reflect.Value).MapKeys>
for _, k := range keys {
print(k) // @pointsto <alloc in (reflect.Value).MapKeys>
print(k) // @types *int
print(k.Interface()) // @types *int
print(k.Interface().(*int)) // @pointsto main.a
v := mrv.MapIndex(k)
print(v.Interface()) // @types *bool
print(v.Interface().(*bool)) // @pointsto main.b
}
}
func reflectSetMapIndex() {
m := make(map[*int]*bool)
mrv := reflect.ValueOf(m)
mrv.SetMapIndex(reflect.ValueOf(&a), reflect.ValueOf(&b))
print(m[nil]) // @pointsto main.b
for _, k := range mrv.MapKeys() {
print(k.Interface()) // @types *int
print(k.Interface().(*int)) // @pointsto main.a
}
tmap := reflect.TypeOf(m)
// types.EvalNode won't let us refer to non-exported types:
// print(tmap) // #@types *reflect.rtype
print(tmap) // @pointsto map[*int]*bool
zmap := reflect.Zero(tmap)
print(zmap) // @pointsto <alloc in reflect.Zero>
print(zmap.Interface()) // @pointsto <alloc in reflect.Zero>
print(tmap.Key()) // @pointsto *int
print(tmap.Elem()) // @pointsto *bool
print(reflect.Zero(tmap.Key())) // @pointsto <alloc in reflect.Zero>
print(reflect.Zero(tmap.Key()).Interface()) // @pointsto <alloc in reflect.Zero>
print(reflect.Zero(tmap.Key()).Interface()) // @types *int
print(reflect.Zero(tmap.Elem())) // @pointsto <alloc in reflect.Zero>
print(reflect.Zero(tmap.Elem()).Interface()) // @pointsto <alloc in reflect.Zero>
print(reflect.Zero(tmap.Elem()).Interface()) // @types *bool
}
func reflectSetMapIndexInterface() {
// Exercises reflect.Value conversions to/from interfaces:
// a different code path than for concrete types.
m := make(map[interface{}]interface{})
reflect.ValueOf(m).SetMapIndex(reflect.ValueOf(&a), reflect.ValueOf(&b))
for k, v := range m {
print(k) // @types *int
print(k.(*int)) // @pointsto main.a
print(v) // @types *bool
print(v.(*bool)) // @pointsto main.b
}
}
func reflectSetMapIndexAssignable() {
// SetMapIndex performs implicit assignability conversions.
type I *int
type J *int
str := reflect.ValueOf("")
// *int is assignable to I.
m1 := make(map[string]I)
reflect.ValueOf(m1).SetMapIndex(str, reflect.ValueOf(new(int))) // @line int
print(m1[""]) // @pointsto new@int:58
// I is assignable to I.
m2 := make(map[string]I)
reflect.ValueOf(m2).SetMapIndex(str, reflect.ValueOf(I(new(int)))) // @line I
print(m2[""]) // @pointsto new@I:60
// J is not assignable to I.
m3 := make(map[string]I)
reflect.ValueOf(m3).SetMapIndex(str, reflect.ValueOf(J(new(int))))
print(m3[""]) // @pointsto
}
func reflectMakeMap() {
t := reflect.TypeOf(map[*int]*bool(nil))
v := reflect.MakeMap(t)
print(v) // @types map[*int]*bool
print(v) // @pointsto <alloc in reflect.MakeMap>
}
func main() {
reflectMapKeysIndex()
reflectSetMapIndex()
reflectSetMapIndexInterface()
reflectSetMapIndexAssignable()
reflectMakeMap()
// TODO(adonovan): reflect.MapOf(Type)
}

74
vendor/golang.org/x/tools/go/pointer/testdata/maps.go generated vendored Normal file
View File

@@ -0,0 +1,74 @@
// +build ignore
package main
// Test of maps.
var a, b, c int
func maps1() {
m1 := map[*int]*int{&a: &b} // @line m1m1
m2 := make(map[*int]*int) // @line m1m2
m2[&b] = &a
print(m1[nil]) // @pointsto main.b | main.c
print(m2[nil]) // @pointsto main.a
print(m1) // @pointsto makemap@m1m1:21
print(m2) // @pointsto makemap@m1m2:12
m1[&b] = &c
for k, v := range m1 {
print(k) // @pointsto main.a | main.b
print(v) // @pointsto main.b | main.c
}
for k, v := range m2 {
print(k) // @pointsto main.b
print(v) // @pointsto main.a
}
// Lookup doesn't create any aliases.
print(m2[&c]) // @pointsto main.a
if _, ok := m2[&a]; ok {
print(m2[&c]) // @pointsto main.a
}
}
func maps2() {
m1 := map[*int]*int{&a: &b}
m2 := map[*int]*int{&b: &c}
_ = []map[*int]*int{m1, m2} // (no spurious merging of m1, m2)
print(m1[nil]) // @pointsto main.b
print(m2[nil]) // @pointsto main.c
}
var g int
func maps3() {
// Regression test for a constraint generation bug for map range
// loops in which the key is unused: the (ok, k, v) tuple
// returned by ssa.Next may have type 'invalid' for the k and/or
// v components, so copying the map key or value may cause
// miswiring if the key has >1 components. In the worst case,
// this causes a crash. The test below used to report that
// pts(v) includes not just main.g but new(float64) too, which
// is ill-typed.
// sizeof(K) > 1, abstractly
type K struct{ a, b *float64 }
k := K{new(float64), nil}
m := map[K]*int{k: &g}
for _, v := range m {
print(v) // @pointsto main.g
}
}
func main() {
maps1()
maps2()
maps3()
}

36
vendor/golang.org/x/tools/go/pointer/testdata/panic.go generated vendored Normal file
View File

@@ -0,0 +1,36 @@
// +build ignore
package main
// Test of value flow from panic() to recover().
// We model them as stores/loads of a global location.
// We ignore concrete panic types originating from the runtime.
var someval int
type myPanic struct{}
func f(int) {}
func g() string { return "" }
func deadcode() {
panic(123) // not reached
}
func main() {
switch someval {
case 0:
panic("oops")
case 1:
panic(myPanic{})
case 2:
panic(f)
case 3:
panic(g)
}
ex := recover()
print(ex) // @types myPanic | string | func(int) | func() string
print(ex.(func(int))) // @pointsto main.f
print(ex.(func() string)) // @pointsto main.g
}

11
vendor/golang.org/x/tools/go/pointer/testdata/recur.go generated vendored Normal file
View File

@@ -0,0 +1,11 @@
// +build ignore
package main
// Analysis abstraction of recursive calls is finite.
func main() {
main()
}
// @calls main.main -> main.main

View File

@@ -0,0 +1,115 @@
// +build ignore
package main
import "reflect"
import "unsafe"
var a, b int
var unknown bool
func reflectIndirect() {
ptr := &a
// Pointer:
print(reflect.Indirect(reflect.ValueOf(&ptr)).Interface().(*int)) // @pointsto main.a
// Non-pointer:
print(reflect.Indirect(reflect.ValueOf([]*int{ptr})).Interface().([]*int)[0]) // @pointsto main.a
}
func reflectNewAt() {
var x [8]byte
print(reflect.NewAt(reflect.TypeOf(3), unsafe.Pointer(&x)).Interface()) // @types *int
}
// @warning "unsound: main.reflectNewAt contains a reflect.NewAt.. call"
func reflectTypeOf() {
t := reflect.TypeOf(3)
if unknown {
t = reflect.TypeOf("foo")
}
// TODO(adonovan): make types.Eval let us refer to unexported types.
print(t) // #@types *reflect.rtype
print(reflect.Zero(t).Interface()) // @types int | string
newint := reflect.New(t).Interface() // @line rtonew
print(newint) // @types *int | *string
print(newint.(*int)) // @pointsto <alloc in reflect.New>
print(newint.(*string)) // @pointsto <alloc in reflect.New>
}
func reflectTypeElem() {
print(reflect.Zero(reflect.TypeOf(&a).Elem()).Interface()) // @types int
print(reflect.Zero(reflect.TypeOf([]string{}).Elem()).Interface()) // @types string
print(reflect.Zero(reflect.TypeOf(make(chan bool)).Elem()).Interface()) // @types bool
print(reflect.Zero(reflect.TypeOf(make(map[string]float64)).Elem()).Interface()) // @types float64
print(reflect.Zero(reflect.TypeOf([3]complex64{}).Elem()).Interface()) // @types complex64
print(reflect.Zero(reflect.TypeOf(3).Elem()).Interface()) // @types
print(reflect.Zero(reflect.TypeOf(new(interface{})).Elem())) // @types interface{}
print(reflect.Zero(reflect.TypeOf(new(interface{})).Elem()).Interface()) // @types
}
// reflect.Values within reflect.Values.
func metareflection() {
// "box" a *int twice, unbox it twice.
v0 := reflect.ValueOf(&a)
print(v0) // @types *int
v1 := reflect.ValueOf(v0) // box
print(v1) // @types reflect.Value
v2 := reflect.ValueOf(v1) // box
print(v2) // @types reflect.Value
v1a := v2.Interface().(reflect.Value) // unbox
print(v1a) // @types reflect.Value
v0a := v1a.Interface().(reflect.Value) // unbox
print(v0a) // @types *int
print(v0a.Interface().(*int)) // @pointsto main.a
// "box" an interface{} lvalue twice, unbox it twice.
var iface interface{} = 3
x0 := reflect.ValueOf(&iface).Elem()
print(x0) // @types interface{}
x1 := reflect.ValueOf(x0) // box
print(x1) // @types reflect.Value
x2 := reflect.ValueOf(x1) // box
print(x2) // @types reflect.Value
x1a := x2.Interface().(reflect.Value) // unbox
print(x1a) // @types reflect.Value
x0a := x1a.Interface().(reflect.Value) // unbox
print(x0a) // @types interface{}
print(x0a.Interface()) // @types int
}
type T struct{}
// When the output of a type constructor flows to its input, we must
// bound the set of types created to ensure termination of the algorithm.
func typeCycle() {
t := reflect.TypeOf(0)
u := reflect.TypeOf("")
v := reflect.TypeOf(T{})
for unknown {
t = reflect.PtrTo(t)
t = reflect.SliceOf(t)
u = reflect.SliceOf(u)
if unknown {
v = reflect.ChanOf(reflect.BothDir, v)
} else {
v = reflect.PtrTo(v)
}
}
// Type height is bounded to about 4 map/slice/chan/pointer constructors.
print(reflect.Zero(t).Interface()) // @types int | []*int | []*[]*int
print(reflect.Zero(u).Interface()) // @types string | []string | [][]string | [][][]string | [][][][]string
print(reflect.Zero(v).Interface()) // @types T | *T | **T | ***T | ****T | chan T | *chan T | **chan T | chan *T | *chan *T | chan **T | chan ***T | chan chan T | chan *chan T | chan chan *T
}
func main() {
reflectIndirect()
reflectNewAt()
reflectTypeOf()
reflectTypeElem()
metareflection()
typeCycle()
}

29
vendor/golang.org/x/tools/go/pointer/testdata/rtti.go generated vendored Normal file
View File

@@ -0,0 +1,29 @@
package main
// Regression test for guru crash
// https://code.google.com/p/go/issues/detail?id=6605
//
// Using reflection, methods may be called on types that are not the
// operand of any ssa.MakeInterface instruction. In this example,
// (Y).F is called by deriving the type Y from *Y. Prior to the fix,
// no RTTI (or method set) for type Y was included in the program, so
// the F() call would crash.
import "reflect"
var a int
type X struct{}
func (X) F() *int {
return &a
}
type I interface {
F() *int
}
func main() {
type Y struct{ X }
print(reflect.Indirect(reflect.ValueOf(new(Y))).Interface().(I).F()) // @pointsto main.a
}

View File

@@ -0,0 +1,45 @@
// +build ignore
package main
import "reflect"
type A struct {
f *int
g interface{}
h bool
}
var dyn string
func reflectTypeFieldByName() {
f, _ := reflect.TypeOf(A{}).FieldByName("f")
print(f.Type) // @pointsto *int
g, _ := reflect.TypeOf(A{}).FieldByName("g")
print(g.Type) // @pointsto interface{}
print(reflect.Zero(g.Type)) // @pointsto <alloc in reflect.Zero>
print(reflect.Zero(g.Type)) // @types interface{}
print(reflect.Zero(g.Type).Interface()) // @pointsto
print(reflect.Zero(g.Type).Interface()) // @types
h, _ := reflect.TypeOf(A{}).FieldByName("h")
print(h.Type) // @pointsto bool
missing, _ := reflect.TypeOf(A{}).FieldByName("missing")
print(missing.Type) // @pointsto
dyn, _ := reflect.TypeOf(A{}).FieldByName(dyn)
print(dyn.Type) // @pointsto *int | bool | interface{}
}
func reflectTypeField() {
fld := reflect.TypeOf(A{}).Field(0)
print(fld.Type) // @pointsto *int | bool | interface{}
}
func main() {
reflectTypeFieldByName()
reflectTypeField()
}

View File

@@ -0,0 +1,100 @@
// +build ignore
package main
var unknown bool // defeat dead-code elimination
var p, q int
type A struct {
f *int
g interface{}
}
func (a A) m1() {
print(a.f) // @pointsto main.p
}
func (a *A) m2() {
print(a) // @pointsto complit.A@struct1s:9
print(a.f) // @pointsto main.p
}
type B struct {
h *int
A
}
func structs1() {
b := &B{ // @line struct1s
h: &q,
}
b.f = &p
b.g = b
print(b.h) // @pointsto main.q
print(b.f) // @pointsto main.p
print(b.g) // @types *B
ptr := &b.f
print(*ptr) // @pointsto main.p
b.m1()
b.m2()
}
// @calls main.structs1 -> (main.A).m1
// @calls main.structs1 -> (*main.A).m2
// @calls (*main.B).m1 -> (main.A).m1
// @calls (*main.B).m2 -> (*main.A).m2
type T struct {
x int
y int
}
type S struct {
a [3]T
b *[3]T
c [3]*T
}
func structs2() {
var s S // @line s2s
print(&s) // @pointsto s@s2s:6
print(&s.a) // @pointsto s.a@s2s:6
print(&s.a[0]) // @pointsto s.a[*]@s2s:6
print(&s.a[0].x) // @pointsto s.a[*].x@s2s:6
print(&s.a[0].y) // @pointsto s.a[*].y@s2s:6
print(&s.b) // @pointsto s.b@s2s:6
print(&s.b[0]) // @pointsto
print(&s.b[0].x) // @pointsto
print(&s.b[0].y) // @pointsto
print(&s.c) // @pointsto s.c@s2s:6
print(&s.c[0]) // @pointsto s.c[*]@s2s:6
print(&s.c[0].x) // @pointsto
print(&s.c[0].y) // @pointsto
var s2 S // @line s2s2
s2.b = new([3]T) // @line s2s2b
print(s2.b) // @pointsto new@s2s2b:12
print(&s2.b) // @pointsto s2.b@s2s2:6
print(&s2.b[0]) // @pointsto new[*]@s2s2b:12
print(&s2.b[0].x) // @pointsto new[*].x@s2s2b:12
print(&s2.b[0].y) // @pointsto new[*].y@s2s2b:12
print(&s2.c[0].x) // @pointsto
print(&s2.c[0].y) // @pointsto
var s3 S // @line s2s3
s3.c[2] = new(T) // @line s2s3c
print(&s3.c) // @pointsto s3.c@s2s3:6
print(s3.c[1]) // @pointsto new@s2s3c:15
print(&s3.c[1]) // @pointsto s3.c[*]@s2s3:6
print(&s3.c[1].x) // @pointsto new.x@s2s3c:15
print(&s3.c[1].y) // @pointsto new.y@s2s3c:15
}
func main() {
structs1()
structs2()
}

24
vendor/golang.org/x/tools/go/pointer/testdata/timer.go generated vendored Normal file
View File

@@ -0,0 +1,24 @@
// +build ignore
package main
import "time"
func after() {}
func main() {
// @calls time.startTimer -> time.sendTime
ticker := time.NewTicker(1)
<-ticker.C
// @calls time.startTimer -> time.sendTime
timer := time.NewTimer(time.Second)
<-timer.C
// @calls time.startTimer -> time.goFunc
// @calls time.goFunc -> main.after
timer = time.AfterFunc(time.Second, after)
<-timer.C
}
// @calls time.sendTime -> time.Now

313
vendor/golang.org/x/tools/go/pointer/util.go generated vendored Normal file
View File

@@ -0,0 +1,313 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pointer
import (
"bytes"
"fmt"
"go/types"
"log"
"os"
"os/exec"
"runtime"
"time"
"golang.org/x/tools/container/intsets"
)
// CanPoint reports whether the type T is pointerlike,
// for the purposes of this analysis.
func CanPoint(T types.Type) bool {
switch T := T.(type) {
case *types.Named:
if obj := T.Obj(); obj.Name() == "Value" && obj.Pkg().Path() == "reflect" {
return true // treat reflect.Value like interface{}
}
return CanPoint(T.Underlying())
case *types.Pointer, *types.Interface, *types.Map, *types.Chan, *types.Signature, *types.Slice:
return true
}
return false // array struct tuple builtin basic
}
// CanHaveDynamicTypes reports whether the type T can "hold" dynamic types,
// i.e. is an interface (incl. reflect.Type) or a reflect.Value.
//
func CanHaveDynamicTypes(T types.Type) bool {
switch T := T.(type) {
case *types.Named:
if obj := T.Obj(); obj.Name() == "Value" && obj.Pkg().Path() == "reflect" {
return true // reflect.Value
}
return CanHaveDynamicTypes(T.Underlying())
case *types.Interface:
return true
}
return false
}
func isInterface(T types.Type) bool { return types.IsInterface(T) }
// mustDeref returns the element type of its argument, which must be a
// pointer; panic ensues otherwise.
func mustDeref(typ types.Type) types.Type {
return typ.Underlying().(*types.Pointer).Elem()
}
// deref returns a pointer's element type; otherwise it returns typ.
func deref(typ types.Type) types.Type {
if p, ok := typ.Underlying().(*types.Pointer); ok {
return p.Elem()
}
return typ
}
// A fieldInfo describes one subelement (node) of the flattening-out
// of a type T: the subelement's type and its path from the root of T.
//
// For example, for this type:
// type line struct{ points []struct{x, y int} }
// flatten() of the inner struct yields the following []fieldInfo:
// struct{ x, y int } ""
// int ".x"
// int ".y"
// and flatten(line) yields:
// struct{ points []struct{x, y int} } ""
// struct{ x, y int } ".points[*]"
// int ".points[*].x
// int ".points[*].y"
//
type fieldInfo struct {
typ types.Type
// op and tail describe the path to the element (e.g. ".a#2.b[*].c").
op interface{} // *Array: true; *Tuple: int; *Struct: *types.Var; *Named: nil
tail *fieldInfo
}
// path returns a user-friendly string describing the subelement path.
//
func (fi *fieldInfo) path() string {
var buf bytes.Buffer
for p := fi; p != nil; p = p.tail {
switch op := p.op.(type) {
case bool:
fmt.Fprintf(&buf, "[*]")
case int:
fmt.Fprintf(&buf, "#%d", op)
case *types.Var:
fmt.Fprintf(&buf, ".%s", op.Name())
}
}
return buf.String()
}
// flatten returns a list of directly contained fields in the preorder
// traversal of the type tree of t. The resulting elements are all
// scalars (basic types or pointerlike types), except for struct/array
// "identity" nodes, whose type is that of the aggregate.
//
// reflect.Value is considered pointerlike, similar to interface{}.
//
// Callers must not mutate the result.
//
func (a *analysis) flatten(t types.Type) []*fieldInfo {
fl, ok := a.flattenMemo[t]
if !ok {
switch t := t.(type) {
case *types.Named:
u := t.Underlying()
if isInterface(u) {
// Debuggability hack: don't remove
// the named type from interfaces as
// they're very verbose.
fl = append(fl, &fieldInfo{typ: t})
} else {
fl = a.flatten(u)
}
case *types.Basic,
*types.Signature,
*types.Chan,
*types.Map,
*types.Interface,
*types.Slice,
*types.Pointer:
fl = append(fl, &fieldInfo{typ: t})
case *types.Array:
fl = append(fl, &fieldInfo{typ: t}) // identity node
for _, fi := range a.flatten(t.Elem()) {
fl = append(fl, &fieldInfo{typ: fi.typ, op: true, tail: fi})
}
case *types.Struct:
fl = append(fl, &fieldInfo{typ: t}) // identity node
for i, n := 0, t.NumFields(); i < n; i++ {
f := t.Field(i)
for _, fi := range a.flatten(f.Type()) {
fl = append(fl, &fieldInfo{typ: fi.typ, op: f, tail: fi})
}
}
case *types.Tuple:
// No identity node: tuples are never address-taken.
n := t.Len()
if n == 1 {
// Don't add a fieldInfo link for singletons,
// e.g. in params/results.
fl = append(fl, a.flatten(t.At(0).Type())...)
} else {
for i := 0; i < n; i++ {
f := t.At(i)
for _, fi := range a.flatten(f.Type()) {
fl = append(fl, &fieldInfo{typ: fi.typ, op: i, tail: fi})
}
}
}
default:
panic(fmt.Sprintf("cannot flatten unsupported type %T", t))
}
a.flattenMemo[t] = fl
}
return fl
}
// sizeof returns the number of pointerlike abstractions (nodes) in the type t.
func (a *analysis) sizeof(t types.Type) uint32 {
return uint32(len(a.flatten(t)))
}
// shouldTrack reports whether object type T contains (recursively)
// any fields whose addresses should be tracked.
func (a *analysis) shouldTrack(T types.Type) bool {
if a.track == trackAll {
return true // fast path
}
track, ok := a.trackTypes[T]
if !ok {
a.trackTypes[T] = true // break cycles conservatively
// NB: reflect.Value, reflect.Type are pre-populated to true.
for _, fi := range a.flatten(T) {
switch ft := fi.typ.Underlying().(type) {
case *types.Interface, *types.Signature:
track = true // needed for callgraph
case *types.Basic:
// no-op
case *types.Chan:
track = a.track&trackChan != 0 || a.shouldTrack(ft.Elem())
case *types.Map:
track = a.track&trackMap != 0 || a.shouldTrack(ft.Key()) || a.shouldTrack(ft.Elem())
case *types.Slice:
track = a.track&trackSlice != 0 || a.shouldTrack(ft.Elem())
case *types.Pointer:
track = a.track&trackPtr != 0 || a.shouldTrack(ft.Elem())
case *types.Array, *types.Struct:
// No need to look at field types since they will follow (flattened).
default:
// Includes *types.Tuple, which are never address-taken.
panic(ft)
}
if track {
break
}
}
a.trackTypes[T] = track
if !track && a.log != nil {
fmt.Fprintf(a.log, "\ttype not tracked: %s\n", T)
}
}
return track
}
// offsetOf returns the (abstract) offset of field index within struct
// or tuple typ.
func (a *analysis) offsetOf(typ types.Type, index int) uint32 {
var offset uint32
switch t := typ.Underlying().(type) {
case *types.Tuple:
for i := 0; i < index; i++ {
offset += a.sizeof(t.At(i).Type())
}
case *types.Struct:
offset++ // the node for the struct itself
for i := 0; i < index; i++ {
offset += a.sizeof(t.Field(i).Type())
}
default:
panic(fmt.Sprintf("offsetOf(%s : %T)", typ, typ))
}
return offset
}
// sliceToArray returns the type representing the arrays to which
// slice type slice points.
func sliceToArray(slice types.Type) *types.Array {
return types.NewArray(slice.Underlying().(*types.Slice).Elem(), 1)
}
// Node set -------------------------------------------------------------------
type nodeset struct {
intsets.Sparse
}
func (ns *nodeset) String() string {
var buf bytes.Buffer
buf.WriteRune('{')
var space [50]int
for i, n := range ns.AppendTo(space[:0]) {
if i > 0 {
buf.WriteString(", ")
}
buf.WriteRune('n')
fmt.Fprintf(&buf, "%d", n)
}
buf.WriteRune('}')
return buf.String()
}
func (ns *nodeset) add(n nodeid) bool {
return ns.Sparse.Insert(int(n))
}
func (x *nodeset) addAll(y *nodeset) bool {
return x.UnionWith(&y.Sparse)
}
// Profiling & debugging -------------------------------------------------------
var timers = make(map[string]time.Time)
func start(name string) {
if debugTimers {
timers[name] = time.Now()
log.Printf("%s...\n", name)
}
}
func stop(name string) {
if debugTimers {
log.Printf("%s took %s\n", name, time.Since(timers[name]))
}
}
// diff runs the command "diff a b" and reports its success.
func diff(a, b string) bool {
var cmd *exec.Cmd
switch runtime.GOOS {
case "plan9":
cmd = exec.Command("/bin/diff", "-c", a, b)
default:
cmd = exec.Command("/usr/bin/diff", "-u", a, b)
}
cmd.Stdout = os.Stderr
cmd.Stderr = os.Stderr
return cmd.Run() == nil
}