Bumping k8s dependencies to 1.13
This commit is contained in:
@@ -1,38 +1,16 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto
|
||||
|
||||
/*
|
||||
Package cluster is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto
|
||||
google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto
|
||||
google/bigtable/admin/cluster/v1/bigtable_cluster_service_messages.proto
|
||||
|
||||
It has these top-level messages:
|
||||
Zone
|
||||
Cluster
|
||||
ListZonesRequest
|
||||
ListZonesResponse
|
||||
GetClusterRequest
|
||||
ListClustersRequest
|
||||
ListClustersResponse
|
||||
CreateClusterRequest
|
||||
CreateClusterMetadata
|
||||
UpdateClusterMetadata
|
||||
DeleteClusterRequest
|
||||
UndeleteClusterRequest
|
||||
UndeleteClusterMetadata
|
||||
V2OperationMetadata
|
||||
*/
|
||||
package cluster
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import _ "google.golang.org/genproto/googleapis/api/annotations"
|
||||
import google_longrunning "google.golang.org/genproto/googleapis/longrunning"
|
||||
import _ "github.com/golang/protobuf/ptypes/timestamp"
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
_ "github.com/golang/protobuf/ptypes/timestamp"
|
||||
_ "google.golang.org/genproto/googleapis/api/annotations"
|
||||
longrunning "google.golang.org/genproto/googleapis/longrunning"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
@@ -62,6 +40,7 @@ var StorageType_name = map[int32]string{
|
||||
1: "STORAGE_SSD",
|
||||
2: "STORAGE_HDD",
|
||||
}
|
||||
|
||||
var StorageType_value = map[string]int32{
|
||||
"STORAGE_UNSPECIFIED": 0,
|
||||
"STORAGE_SSD": 1,
|
||||
@@ -71,7 +50,10 @@ var StorageType_value = map[string]int32{
|
||||
func (x StorageType) String() string {
|
||||
return proto.EnumName(StorageType_name, int32(x))
|
||||
}
|
||||
func (StorageType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||
|
||||
func (StorageType) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_33bf61dbf3bd7369, []int{0}
|
||||
}
|
||||
|
||||
// Possible states of a zone.
|
||||
type Zone_Status int32
|
||||
@@ -93,9 +75,10 @@ var Zone_Status_name = map[int32]string{
|
||||
2: "PLANNED_MAINTENANCE",
|
||||
3: "EMERGENCY_MAINENANCE",
|
||||
}
|
||||
|
||||
var Zone_Status_value = map[string]int32{
|
||||
"UNKNOWN": 0,
|
||||
"OK": 1,
|
||||
"UNKNOWN": 0,
|
||||
"OK": 1,
|
||||
"PLANNED_MAINTENANCE": 2,
|
||||
"EMERGENCY_MAINENANCE": 3,
|
||||
}
|
||||
@@ -103,24 +86,50 @@ var Zone_Status_value = map[string]int32{
|
||||
func (x Zone_Status) String() string {
|
||||
return proto.EnumName(Zone_Status_name, int32(x))
|
||||
}
|
||||
func (Zone_Status) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} }
|
||||
|
||||
func (Zone_Status) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_33bf61dbf3bd7369, []int{0, 0}
|
||||
}
|
||||
|
||||
// A physical location in which a particular project can allocate Cloud BigTable
|
||||
// resources.
|
||||
type Zone struct {
|
||||
// A permanent unique identifier for the zone.
|
||||
// Values are of the form projects/<project>/zones/[a-z][-a-z0-9]*
|
||||
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
// The name of this zone as it appears in UIs.
|
||||
DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName" json:"display_name,omitempty"`
|
||||
DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
|
||||
// The current state of this zone.
|
||||
Status Zone_Status `protobuf:"varint,3,opt,name=status,enum=google.bigtable.admin.cluster.v1.Zone_Status" json:"status,omitempty"`
|
||||
Status Zone_Status `protobuf:"varint,3,opt,name=status,proto3,enum=google.bigtable.admin.cluster.v1.Zone_Status" json:"status,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Zone) Reset() { *m = Zone{} }
|
||||
func (m *Zone) String() string { return proto.CompactTextString(m) }
|
||||
func (*Zone) ProtoMessage() {}
|
||||
func (*Zone) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||
func (m *Zone) Reset() { *m = Zone{} }
|
||||
func (m *Zone) String() string { return proto.CompactTextString(m) }
|
||||
func (*Zone) ProtoMessage() {}
|
||||
func (*Zone) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_33bf61dbf3bd7369, []int{0}
|
||||
}
|
||||
|
||||
func (m *Zone) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Zone.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Zone) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Zone.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Zone) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Zone.Merge(m, src)
|
||||
}
|
||||
func (m *Zone) XXX_Size() int {
|
||||
return xxx_messageInfo_Zone.Size(m)
|
||||
}
|
||||
func (m *Zone) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Zone.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Zone proto.InternalMessageInfo
|
||||
|
||||
func (m *Zone) GetName() string {
|
||||
if m != nil {
|
||||
@@ -149,26 +158,49 @@ type Cluster struct {
|
||||
// zone in which the cluster resides is included here.
|
||||
// Values are of the form
|
||||
// projects/<project>/zones/<zone>/clusters/[a-z][-a-z0-9]*
|
||||
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
// The operation currently running on the cluster, if any.
|
||||
// This cannot be set directly, only through CreateCluster, UpdateCluster,
|
||||
// or UndeleteCluster. Calls to these methods will be rejected if
|
||||
// "current_operation" is already set.
|
||||
CurrentOperation *google_longrunning.Operation `protobuf:"bytes,3,opt,name=current_operation,json=currentOperation" json:"current_operation,omitempty"`
|
||||
CurrentOperation *longrunning.Operation `protobuf:"bytes,3,opt,name=current_operation,json=currentOperation,proto3" json:"current_operation,omitempty"`
|
||||
// The descriptive name for this cluster as it appears in UIs.
|
||||
// Must be unique per zone.
|
||||
DisplayName string `protobuf:"bytes,4,opt,name=display_name,json=displayName" json:"display_name,omitempty"`
|
||||
DisplayName string `protobuf:"bytes,4,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
|
||||
// The number of serve nodes allocated to this cluster.
|
||||
ServeNodes int32 `protobuf:"varint,5,opt,name=serve_nodes,json=serveNodes" json:"serve_nodes,omitempty"`
|
||||
ServeNodes int32 `protobuf:"varint,5,opt,name=serve_nodes,json=serveNodes,proto3" json:"serve_nodes,omitempty"`
|
||||
// What storage type to use for tables in this cluster. Only configurable at
|
||||
// cluster creation time. If unspecified, STORAGE_SSD will be used.
|
||||
DefaultStorageType StorageType `protobuf:"varint,8,opt,name=default_storage_type,json=defaultStorageType,enum=google.bigtable.admin.cluster.v1.StorageType" json:"default_storage_type,omitempty"`
|
||||
DefaultStorageType StorageType `protobuf:"varint,8,opt,name=default_storage_type,json=defaultStorageType,proto3,enum=google.bigtable.admin.cluster.v1.StorageType" json:"default_storage_type,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Cluster) Reset() { *m = Cluster{} }
|
||||
func (m *Cluster) String() string { return proto.CompactTextString(m) }
|
||||
func (*Cluster) ProtoMessage() {}
|
||||
func (*Cluster) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
|
||||
func (m *Cluster) Reset() { *m = Cluster{} }
|
||||
func (m *Cluster) String() string { return proto.CompactTextString(m) }
|
||||
func (*Cluster) ProtoMessage() {}
|
||||
func (*Cluster) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_33bf61dbf3bd7369, []int{1}
|
||||
}
|
||||
|
||||
func (m *Cluster) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Cluster.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Cluster) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Cluster.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Cluster) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Cluster.Merge(m, src)
|
||||
}
|
||||
func (m *Cluster) XXX_Size() int {
|
||||
return xxx_messageInfo_Cluster.Size(m)
|
||||
}
|
||||
func (m *Cluster) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Cluster.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Cluster proto.InternalMessageInfo
|
||||
|
||||
func (m *Cluster) GetName() string {
|
||||
if m != nil {
|
||||
@@ -177,7 +209,7 @@ func (m *Cluster) GetName() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Cluster) GetCurrentOperation() *google_longrunning.Operation {
|
||||
func (m *Cluster) GetCurrentOperation() *longrunning.Operation {
|
||||
if m != nil {
|
||||
return m.CurrentOperation
|
||||
}
|
||||
@@ -206,17 +238,17 @@ func (m *Cluster) GetDefaultStorageType() StorageType {
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Zone)(nil), "google.bigtable.admin.cluster.v1.Zone")
|
||||
proto.RegisterType((*Cluster)(nil), "google.bigtable.admin.cluster.v1.Cluster")
|
||||
proto.RegisterEnum("google.bigtable.admin.cluster.v1.StorageType", StorageType_name, StorageType_value)
|
||||
proto.RegisterEnum("google.bigtable.admin.cluster.v1.Zone_Status", Zone_Status_name, Zone_Status_value)
|
||||
proto.RegisterType((*Zone)(nil), "google.bigtable.admin.cluster.v1.Zone")
|
||||
proto.RegisterType((*Cluster)(nil), "google.bigtable.admin.cluster.v1.Cluster")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto", fileDescriptor0)
|
||||
proto.RegisterFile("google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto", fileDescriptor_33bf61dbf3bd7369)
|
||||
}
|
||||
|
||||
var fileDescriptor0 = []byte{
|
||||
var fileDescriptor_33bf61dbf3bd7369 = []byte{
|
||||
// 493 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0xd1, 0x6e, 0xd3, 0x3c,
|
||||
0x1c, 0xc5, 0x97, 0xae, 0xeb, 0xbe, 0xcf, 0x41, 0x10, 0xcc, 0x24, 0xa2, 0x09, 0xb4, 0x52, 0xb8,
|
||||
|
@@ -3,16 +3,15 @@
|
||||
|
||||
package cluster
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import _ "google.golang.org/genproto/googleapis/api/annotations"
|
||||
import google_longrunning "google.golang.org/genproto/googleapis/longrunning"
|
||||
import google_protobuf2 "github.com/golang/protobuf/ptypes/empty"
|
||||
|
||||
import (
|
||||
context "golang.org/x/net/context"
|
||||
context "context"
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
empty "github.com/golang/protobuf/ptypes/empty"
|
||||
_ "google.golang.org/genproto/googleapis/api/annotations"
|
||||
longrunning "google.golang.org/genproto/googleapis/longrunning"
|
||||
grpc "google.golang.org/grpc"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
@@ -20,6 +19,53 @@ var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto", fileDescriptor_597cf005382fe64c)
|
||||
}
|
||||
|
||||
var fileDescriptor_597cf005382fe64c = []byte{
|
||||
// 515 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0x4f, 0x6b, 0x14, 0x31,
|
||||
0x18, 0xc6, 0x89, 0x07, 0xa1, 0xc1, 0x45, 0xc8, 0xa1, 0x87, 0x6d, 0x0b, 0x32, 0x15, 0xb1, 0x23,
|
||||
0x26, 0x6e, 0x17, 0xc5, 0xbf, 0x08, 0x5b, 0xa5, 0x1e, 0x04, 0x8b, 0xd2, 0x4b, 0x2f, 0x4b, 0x76,
|
||||
0xe7, 0x35, 0x8c, 0xcc, 0x24, 0x31, 0xc9, 0x2c, 0xa8, 0xf4, 0xe2, 0xcd, 0x93, 0x88, 0x27, 0x3d,
|
||||
0x78, 0xeb, 0xdd, 0xef, 0xe2, 0x57, 0xf0, 0x83, 0xc8, 0x64, 0x92, 0xb5, 0x2b, 0x6b, 0x77, 0xa6,
|
||||
0xb7, 0x99, 0xc9, 0xfb, 0xbc, 0xcf, 0x6f, 0x9e, 0x24, 0x2f, 0x7e, 0x2c, 0x94, 0x12, 0x05, 0xb0,
|
||||
0x49, 0x2e, 0x1c, 0x9f, 0x14, 0xc0, 0x78, 0x56, 0xe6, 0x92, 0x4d, 0x8b, 0xca, 0x3a, 0x30, 0x6c,
|
||||
0x36, 0x98, 0xaf, 0x8c, 0xc3, 0xb7, 0xb1, 0x05, 0x33, 0xcb, 0xa7, 0x40, 0xb5, 0x51, 0x4e, 0x91,
|
||||
0x2b, 0x4d, 0x03, 0x1a, 0xcb, 0xa8, 0x6f, 0x40, 0x43, 0x31, 0x9d, 0x0d, 0xfa, 0x9b, 0xc1, 0x82,
|
||||
0xeb, 0x9c, 0x71, 0x29, 0x95, 0xe3, 0x2e, 0x57, 0xd2, 0x36, 0xfa, 0xfe, 0xc3, 0xee, 0x00, 0x19,
|
||||
0x77, 0x3c, 0xa8, 0x9f, 0x9d, 0x1b, 0x7f, 0x5c, 0x82, 0xb5, 0x5c, 0x40, 0xe4, 0xd8, 0x0e, 0x9d,
|
||||
0x0a, 0x25, 0x85, 0xa9, 0xa4, 0xcc, 0xa5, 0x60, 0x4a, 0x83, 0x59, 0x80, 0xdd, 0x08, 0x45, 0xfe,
|
||||
0x6d, 0x52, 0xbd, 0x66, 0x50, 0x6a, 0xf7, 0xae, 0x59, 0xdc, 0xfd, 0xb4, 0x86, 0xd7, 0x47, 0xc1,
|
||||
0x6d, 0xaf, 0x31, 0x7b, 0xd5, 0x78, 0x91, 0x6f, 0x08, 0xaf, 0x3d, 0xcf, 0xad, 0x3b, 0x52, 0x12,
|
||||
0x2c, 0xd9, 0xa5, 0xab, 0x32, 0xa3, 0xf3, 0xe2, 0x97, 0xf0, 0xb6, 0x02, 0xeb, 0xfa, 0xc3, 0x4e,
|
||||
0x1a, 0xab, 0x95, 0xb4, 0x90, 0x6c, 0x7f, 0xfc, 0xf5, 0xfb, 0xeb, 0x85, 0x2d, 0xb2, 0x51, 0x07,
|
||||
0xf1, 0x41, 0xf2, 0x12, 0x1e, 0x69, 0xa3, 0xde, 0xc0, 0xd4, 0x59, 0x96, 0x1e, 0xb3, 0xf7, 0x9e,
|
||||
0xe6, 0x07, 0xc2, 0x78, 0x1f, 0x5c, 0x20, 0x26, 0x2d, 0x8c, 0xfe, 0x56, 0x47, 0xba, 0x9d, 0xd5,
|
||||
0xa2, 0xa0, 0x48, 0x6e, 0x79, 0xa6, 0x94, 0x5c, 0x5f, 0xc6, 0xd4, 0x20, 0xb1, 0x34, 0x6e, 0x60,
|
||||
0x8d, 0x49, 0x7e, 0x22, 0x7c, 0xa9, 0xfe, 0xb7, 0xd0, 0xc1, 0x92, 0xdb, 0xed, 0xb2, 0x88, 0xf5,
|
||||
0x11, 0xf2, 0x4e, 0x57, 0x59, 0x48, 0x71, 0xe0, 0x89, 0x6f, 0x90, 0x9d, 0xe5, 0x29, 0x72, 0x21,
|
||||
0x0c, 0x08, 0xee, 0x20, 0x9b, 0x53, 0x93, 0x13, 0x84, 0x7b, 0x7b, 0x06, 0xb8, 0x8b, 0x07, 0x81,
|
||||
0xb4, 0x30, 0x5f, 0x10, 0x9c, 0x23, 0xd9, 0xc0, 0x99, 0x5c, 0x3b, 0x2b, 0xd9, 0xe3, 0x39, 0xe4,
|
||||
0x7d, 0x94, 0x92, 0xef, 0x08, 0xf7, 0x0e, 0x75, 0x76, 0x8a, 0xb3, 0xbd, 0x5f, 0x17, 0xb4, 0xa1,
|
||||
0x47, 0xbb, 0xd9, 0x6f, 0xbd, 0xe9, 0x35, 0xdc, 0x17, 0x84, 0x7b, 0x4f, 0xa0, 0x80, 0x4e, 0x21,
|
||||
0x2e, 0x08, 0x62, 0x88, 0xeb, 0x51, 0x17, 0xef, 0x2d, 0x7d, 0x5a, 0xdf, 0xdb, 0x78, 0x16, 0xd3,
|
||||
0xf6, 0x67, 0xf1, 0x04, 0xe1, 0xcb, 0x87, 0x32, 0x5b, 0xa0, 0xba, 0xbb, 0x9a, 0xea, 0x1f, 0x49,
|
||||
0xe4, 0xda, 0x8a, 0xca, 0x53, 0x43, 0x87, 0xbe, 0x88, 0x43, 0x27, 0xb9, 0xe7, 0xf1, 0x86, 0xc9,
|
||||
0xa0, 0x75, 0x6a, 0x55, 0xf0, 0x19, 0x7d, 0x46, 0xf8, 0xea, 0x54, 0x95, 0x2b, 0xc9, 0x46, 0x9b,
|
||||
0xcb, 0x27, 0x96, 0x3d, 0xa8, 0x93, 0x3a, 0x40, 0x47, 0xfb, 0xa1, 0x83, 0x50, 0x05, 0x97, 0x82,
|
||||
0x2a, 0x23, 0x98, 0x00, 0xe9, 0x73, 0x64, 0xcd, 0x12, 0xd7, 0xb9, 0xfd, 0xff, 0xfc, 0x7d, 0x10,
|
||||
0x1e, 0x27, 0x17, 0xbd, 0x66, 0xf8, 0x27, 0x00, 0x00, 0xff, 0xff, 0x50, 0x92, 0x91, 0x86, 0x71,
|
||||
0x06, 0x00, 0x00,
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConn
|
||||
@@ -28,8 +74,9 @@ var _ grpc.ClientConn
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion4
|
||||
|
||||
// Client API for BigtableClusterService service
|
||||
|
||||
// BigtableClusterServiceClient is the client API for BigtableClusterService service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||||
type BigtableClusterServiceClient interface {
|
||||
// Lists the supported zones for the given project.
|
||||
ListZones(ctx context.Context, in *ListZonesRequest, opts ...grpc.CallOption) (*ListZonesResponse, error)
|
||||
@@ -93,7 +140,7 @@ type BigtableClusterServiceClient interface {
|
||||
// At the cluster's "delete_time":
|
||||
// * The cluster and *all of its tables* will immediately and irrevocably
|
||||
// disappear from the API, and their data will be permanently deleted.
|
||||
DeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error)
|
||||
DeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*empty.Empty, error)
|
||||
// Cancels the scheduled deletion of an cluster and begins preparing it to
|
||||
// resume serving. The returned operation will also be embedded as the
|
||||
// cluster's "current_operation".
|
||||
@@ -107,7 +154,7 @@ type BigtableClusterServiceClient interface {
|
||||
// * All tables within the cluster will be available.
|
||||
// [UndeleteClusterMetadata][google.bigtable.admin.cluster.v1.UndeleteClusterMetadata] The embedded operation's "response" field type is
|
||||
// [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful.
|
||||
UndeleteCluster(ctx context.Context, in *UndeleteClusterRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error)
|
||||
UndeleteCluster(ctx context.Context, in *UndeleteClusterRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
|
||||
}
|
||||
|
||||
type bigtableClusterServiceClient struct {
|
||||
@@ -120,7 +167,7 @@ func NewBigtableClusterServiceClient(cc *grpc.ClientConn) BigtableClusterService
|
||||
|
||||
func (c *bigtableClusterServiceClient) ListZones(ctx context.Context, in *ListZonesRequest, opts ...grpc.CallOption) (*ListZonesResponse, error) {
|
||||
out := new(ListZonesResponse)
|
||||
err := grpc.Invoke(ctx, "/google.bigtable.admin.cluster.v1.BigtableClusterService/ListZones", in, out, c.cc, opts...)
|
||||
err := c.cc.Invoke(ctx, "/google.bigtable.admin.cluster.v1.BigtableClusterService/ListZones", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -129,7 +176,7 @@ func (c *bigtableClusterServiceClient) ListZones(ctx context.Context, in *ListZo
|
||||
|
||||
func (c *bigtableClusterServiceClient) GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*Cluster, error) {
|
||||
out := new(Cluster)
|
||||
err := grpc.Invoke(ctx, "/google.bigtable.admin.cluster.v1.BigtableClusterService/GetCluster", in, out, c.cc, opts...)
|
||||
err := c.cc.Invoke(ctx, "/google.bigtable.admin.cluster.v1.BigtableClusterService/GetCluster", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -138,7 +185,7 @@ func (c *bigtableClusterServiceClient) GetCluster(ctx context.Context, in *GetCl
|
||||
|
||||
func (c *bigtableClusterServiceClient) ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error) {
|
||||
out := new(ListClustersResponse)
|
||||
err := grpc.Invoke(ctx, "/google.bigtable.admin.cluster.v1.BigtableClusterService/ListClusters", in, out, c.cc, opts...)
|
||||
err := c.cc.Invoke(ctx, "/google.bigtable.admin.cluster.v1.BigtableClusterService/ListClusters", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -147,7 +194,7 @@ func (c *bigtableClusterServiceClient) ListClusters(ctx context.Context, in *Lis
|
||||
|
||||
func (c *bigtableClusterServiceClient) CreateCluster(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*Cluster, error) {
|
||||
out := new(Cluster)
|
||||
err := grpc.Invoke(ctx, "/google.bigtable.admin.cluster.v1.BigtableClusterService/CreateCluster", in, out, c.cc, opts...)
|
||||
err := c.cc.Invoke(ctx, "/google.bigtable.admin.cluster.v1.BigtableClusterService/CreateCluster", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -156,33 +203,32 @@ func (c *bigtableClusterServiceClient) CreateCluster(ctx context.Context, in *Cr
|
||||
|
||||
func (c *bigtableClusterServiceClient) UpdateCluster(ctx context.Context, in *Cluster, opts ...grpc.CallOption) (*Cluster, error) {
|
||||
out := new(Cluster)
|
||||
err := grpc.Invoke(ctx, "/google.bigtable.admin.cluster.v1.BigtableClusterService/UpdateCluster", in, out, c.cc, opts...)
|
||||
err := c.cc.Invoke(ctx, "/google.bigtable.admin.cluster.v1.BigtableClusterService/UpdateCluster", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *bigtableClusterServiceClient) DeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) {
|
||||
out := new(google_protobuf2.Empty)
|
||||
err := grpc.Invoke(ctx, "/google.bigtable.admin.cluster.v1.BigtableClusterService/DeleteCluster", in, out, c.cc, opts...)
|
||||
func (c *bigtableClusterServiceClient) DeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*empty.Empty, error) {
|
||||
out := new(empty.Empty)
|
||||
err := c.cc.Invoke(ctx, "/google.bigtable.admin.cluster.v1.BigtableClusterService/DeleteCluster", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *bigtableClusterServiceClient) UndeleteCluster(ctx context.Context, in *UndeleteClusterRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) {
|
||||
out := new(google_longrunning.Operation)
|
||||
err := grpc.Invoke(ctx, "/google.bigtable.admin.cluster.v1.BigtableClusterService/UndeleteCluster", in, out, c.cc, opts...)
|
||||
func (c *bigtableClusterServiceClient) UndeleteCluster(ctx context.Context, in *UndeleteClusterRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) {
|
||||
out := new(longrunning.Operation)
|
||||
err := c.cc.Invoke(ctx, "/google.bigtable.admin.cluster.v1.BigtableClusterService/UndeleteCluster", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Server API for BigtableClusterService service
|
||||
|
||||
// BigtableClusterServiceServer is the server API for BigtableClusterService service.
|
||||
type BigtableClusterServiceServer interface {
|
||||
// Lists the supported zones for the given project.
|
||||
ListZones(context.Context, *ListZonesRequest) (*ListZonesResponse, error)
|
||||
@@ -246,7 +292,7 @@ type BigtableClusterServiceServer interface {
|
||||
// At the cluster's "delete_time":
|
||||
// * The cluster and *all of its tables* will immediately and irrevocably
|
||||
// disappear from the API, and their data will be permanently deleted.
|
||||
DeleteCluster(context.Context, *DeleteClusterRequest) (*google_protobuf2.Empty, error)
|
||||
DeleteCluster(context.Context, *DeleteClusterRequest) (*empty.Empty, error)
|
||||
// Cancels the scheduled deletion of an cluster and begins preparing it to
|
||||
// resume serving. The returned operation will also be embedded as the
|
||||
// cluster's "current_operation".
|
||||
@@ -260,7 +306,7 @@ type BigtableClusterServiceServer interface {
|
||||
// * All tables within the cluster will be available.
|
||||
// [UndeleteClusterMetadata][google.bigtable.admin.cluster.v1.UndeleteClusterMetadata] The embedded operation's "response" field type is
|
||||
// [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful.
|
||||
UndeleteCluster(context.Context, *UndeleteClusterRequest) (*google_longrunning.Operation, error)
|
||||
UndeleteCluster(context.Context, *UndeleteClusterRequest) (*longrunning.Operation, error)
|
||||
}
|
||||
|
||||
func RegisterBigtableClusterServiceServer(s *grpc.Server, srv BigtableClusterServiceServer) {
|
||||
@@ -429,44 +475,3 @@ var _BigtableClusterService_serviceDesc = grpc.ServiceDesc{
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto",
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto", fileDescriptor1)
|
||||
}
|
||||
|
||||
var fileDescriptor1 = []byte{
|
||||
// 515 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0x4f, 0x6b, 0x14, 0x31,
|
||||
0x18, 0xc6, 0x89, 0x07, 0xa1, 0xc1, 0x45, 0xc8, 0xa1, 0x87, 0x6d, 0x0b, 0x32, 0x15, 0xb1, 0x23,
|
||||
0x26, 0x6e, 0x17, 0xc5, 0xbf, 0x08, 0x5b, 0xa5, 0x1e, 0x04, 0x8b, 0xd2, 0x4b, 0x2f, 0x4b, 0x76,
|
||||
0xe7, 0x35, 0x8c, 0xcc, 0x24, 0x31, 0xc9, 0x2c, 0xa8, 0xf4, 0xe2, 0xcd, 0x93, 0x88, 0x27, 0x3d,
|
||||
0x78, 0xeb, 0xdd, 0xef, 0xe2, 0x57, 0xf0, 0x83, 0xc8, 0x64, 0x92, 0xb5, 0x2b, 0x6b, 0x77, 0xa6,
|
||||
0xb7, 0x99, 0xc9, 0xfb, 0xbc, 0xcf, 0x6f, 0x9e, 0x24, 0x2f, 0x7e, 0x2c, 0x94, 0x12, 0x05, 0xb0,
|
||||
0x49, 0x2e, 0x1c, 0x9f, 0x14, 0xc0, 0x78, 0x56, 0xe6, 0x92, 0x4d, 0x8b, 0xca, 0x3a, 0x30, 0x6c,
|
||||
0x36, 0x98, 0xaf, 0x8c, 0xc3, 0xb7, 0xb1, 0x05, 0x33, 0xcb, 0xa7, 0x40, 0xb5, 0x51, 0x4e, 0x91,
|
||||
0x2b, 0x4d, 0x03, 0x1a, 0xcb, 0xa8, 0x6f, 0x40, 0x43, 0x31, 0x9d, 0x0d, 0xfa, 0x9b, 0xc1, 0x82,
|
||||
0xeb, 0x9c, 0x71, 0x29, 0x95, 0xe3, 0x2e, 0x57, 0xd2, 0x36, 0xfa, 0xfe, 0xc3, 0xee, 0x00, 0x19,
|
||||
0x77, 0x3c, 0xa8, 0x9f, 0x9d, 0x1b, 0x7f, 0x5c, 0x82, 0xb5, 0x5c, 0x40, 0xe4, 0xd8, 0x0e, 0x9d,
|
||||
0x0a, 0x25, 0x85, 0xa9, 0xa4, 0xcc, 0xa5, 0x60, 0x4a, 0x83, 0x59, 0x80, 0xdd, 0x08, 0x45, 0xfe,
|
||||
0x6d, 0x52, 0xbd, 0x66, 0x50, 0x6a, 0xf7, 0xae, 0x59, 0xdc, 0xfd, 0xb4, 0x86, 0xd7, 0x47, 0xc1,
|
||||
0x6d, 0xaf, 0x31, 0x7b, 0xd5, 0x78, 0x91, 0x6f, 0x08, 0xaf, 0x3d, 0xcf, 0xad, 0x3b, 0x52, 0x12,
|
||||
0x2c, 0xd9, 0xa5, 0xab, 0x32, 0xa3, 0xf3, 0xe2, 0x97, 0xf0, 0xb6, 0x02, 0xeb, 0xfa, 0xc3, 0x4e,
|
||||
0x1a, 0xab, 0x95, 0xb4, 0x90, 0x6c, 0x7f, 0xfc, 0xf5, 0xfb, 0xeb, 0x85, 0x2d, 0xb2, 0x51, 0x07,
|
||||
0xf1, 0x41, 0xf2, 0x12, 0x1e, 0x69, 0xa3, 0xde, 0xc0, 0xd4, 0x59, 0x96, 0x1e, 0xb3, 0xf7, 0x9e,
|
||||
0xe6, 0x07, 0xc2, 0x78, 0x1f, 0x5c, 0x20, 0x26, 0x2d, 0x8c, 0xfe, 0x56, 0x47, 0xba, 0x9d, 0xd5,
|
||||
0xa2, 0xa0, 0x48, 0x6e, 0x79, 0xa6, 0x94, 0x5c, 0x5f, 0xc6, 0xd4, 0x20, 0xb1, 0x34, 0x6e, 0x60,
|
||||
0x8d, 0x49, 0x7e, 0x22, 0x7c, 0xa9, 0xfe, 0xb7, 0xd0, 0xc1, 0x92, 0xdb, 0xed, 0xb2, 0x88, 0xf5,
|
||||
0x11, 0xf2, 0x4e, 0x57, 0x59, 0x48, 0x71, 0xe0, 0x89, 0x6f, 0x90, 0x9d, 0xe5, 0x29, 0x72, 0x21,
|
||||
0x0c, 0x08, 0xee, 0x20, 0x9b, 0x53, 0x93, 0x13, 0x84, 0x7b, 0x7b, 0x06, 0xb8, 0x8b, 0x07, 0x81,
|
||||
0xb4, 0x30, 0x5f, 0x10, 0x9c, 0x23, 0xd9, 0xc0, 0x99, 0x5c, 0x3b, 0x2b, 0xd9, 0xe3, 0x39, 0xe4,
|
||||
0x7d, 0x94, 0x92, 0xef, 0x08, 0xf7, 0x0e, 0x75, 0x76, 0x8a, 0xb3, 0xbd, 0x5f, 0x17, 0xb4, 0xa1,
|
||||
0x47, 0xbb, 0xd9, 0x6f, 0xbd, 0xe9, 0x35, 0xdc, 0x17, 0x84, 0x7b, 0x4f, 0xa0, 0x80, 0x4e, 0x21,
|
||||
0x2e, 0x08, 0x62, 0x88, 0xeb, 0x51, 0x17, 0xef, 0x2d, 0x7d, 0x5a, 0xdf, 0xdb, 0x78, 0x16, 0xd3,
|
||||
0xf6, 0x67, 0xf1, 0x04, 0xe1, 0xcb, 0x87, 0x32, 0x5b, 0xa0, 0xba, 0xbb, 0x9a, 0xea, 0x1f, 0x49,
|
||||
0xe4, 0xda, 0x8a, 0xca, 0x53, 0x43, 0x87, 0xbe, 0x88, 0x43, 0x27, 0xb9, 0xe7, 0xf1, 0x86, 0xc9,
|
||||
0xa0, 0x75, 0x6a, 0x55, 0xf0, 0x19, 0x7d, 0x46, 0xf8, 0xea, 0x54, 0x95, 0x2b, 0xc9, 0x46, 0x9b,
|
||||
0xcb, 0x27, 0x96, 0x3d, 0xa8, 0x93, 0x3a, 0x40, 0x47, 0xfb, 0xa1, 0x83, 0x50, 0x05, 0x97, 0x82,
|
||||
0x2a, 0x23, 0x98, 0x00, 0xe9, 0x73, 0x64, 0xcd, 0x12, 0xd7, 0xb9, 0xfd, 0xff, 0xfc, 0x7d, 0x10,
|
||||
0x1e, 0x27, 0x17, 0xbd, 0x66, 0xf8, 0x27, 0x00, 0x00, 0xff, 0xff, 0x50, 0x92, 0x91, 0x86, 0x71,
|
||||
0x06, 0x00, 0x00,
|
||||
}
|
||||
|
@@ -3,28 +3,59 @@
|
||||
|
||||
package cluster
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import google_protobuf3 "github.com/golang/protobuf/ptypes/timestamp"
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
timestamp "github.com/golang/protobuf/ptypes/timestamp"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// Request message for BigtableClusterService.ListZones.
|
||||
type ListZonesRequest struct {
|
||||
// The unique name of the project for which a list of supported zones is
|
||||
// requested.
|
||||
// Values are of the form projects/<project>
|
||||
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ListZonesRequest) Reset() { *m = ListZonesRequest{} }
|
||||
func (m *ListZonesRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*ListZonesRequest) ProtoMessage() {}
|
||||
func (*ListZonesRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} }
|
||||
func (m *ListZonesRequest) Reset() { *m = ListZonesRequest{} }
|
||||
func (m *ListZonesRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*ListZonesRequest) ProtoMessage() {}
|
||||
func (*ListZonesRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_2a8715cfb8408734, []int{0}
|
||||
}
|
||||
|
||||
func (m *ListZonesRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ListZonesRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ListZonesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ListZonesRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ListZonesRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ListZonesRequest.Merge(m, src)
|
||||
}
|
||||
func (m *ListZonesRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_ListZonesRequest.Size(m)
|
||||
}
|
||||
func (m *ListZonesRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ListZonesRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ListZonesRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *ListZonesRequest) GetName() string {
|
||||
if m != nil {
|
||||
@@ -36,13 +67,36 @@ func (m *ListZonesRequest) GetName() string {
|
||||
// Response message for BigtableClusterService.ListZones.
|
||||
type ListZonesResponse struct {
|
||||
// The list of requested zones.
|
||||
Zones []*Zone `protobuf:"bytes,1,rep,name=zones" json:"zones,omitempty"`
|
||||
Zones []*Zone `protobuf:"bytes,1,rep,name=zones,proto3" json:"zones,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ListZonesResponse) Reset() { *m = ListZonesResponse{} }
|
||||
func (m *ListZonesResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*ListZonesResponse) ProtoMessage() {}
|
||||
func (*ListZonesResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} }
|
||||
func (m *ListZonesResponse) Reset() { *m = ListZonesResponse{} }
|
||||
func (m *ListZonesResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*ListZonesResponse) ProtoMessage() {}
|
||||
func (*ListZonesResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_2a8715cfb8408734, []int{1}
|
||||
}
|
||||
|
||||
func (m *ListZonesResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ListZonesResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ListZonesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ListZonesResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ListZonesResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ListZonesResponse.Merge(m, src)
|
||||
}
|
||||
func (m *ListZonesResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_ListZonesResponse.Size(m)
|
||||
}
|
||||
func (m *ListZonesResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ListZonesResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ListZonesResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *ListZonesResponse) GetZones() []*Zone {
|
||||
if m != nil {
|
||||
@@ -55,13 +109,36 @@ func (m *ListZonesResponse) GetZones() []*Zone {
|
||||
type GetClusterRequest struct {
|
||||
// The unique name of the requested cluster.
|
||||
// Values are of the form projects/<project>/zones/<zone>/clusters/<cluster>
|
||||
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *GetClusterRequest) Reset() { *m = GetClusterRequest{} }
|
||||
func (m *GetClusterRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*GetClusterRequest) ProtoMessage() {}
|
||||
func (*GetClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{2} }
|
||||
func (m *GetClusterRequest) Reset() { *m = GetClusterRequest{} }
|
||||
func (m *GetClusterRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*GetClusterRequest) ProtoMessage() {}
|
||||
func (*GetClusterRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_2a8715cfb8408734, []int{2}
|
||||
}
|
||||
|
||||
func (m *GetClusterRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_GetClusterRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *GetClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_GetClusterRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *GetClusterRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_GetClusterRequest.Merge(m, src)
|
||||
}
|
||||
func (m *GetClusterRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_GetClusterRequest.Size(m)
|
||||
}
|
||||
func (m *GetClusterRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_GetClusterRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_GetClusterRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *GetClusterRequest) GetName() string {
|
||||
if m != nil {
|
||||
@@ -74,13 +151,36 @@ func (m *GetClusterRequest) GetName() string {
|
||||
type ListClustersRequest struct {
|
||||
// The unique name of the project for which a list of clusters is requested.
|
||||
// Values are of the form projects/<project>
|
||||
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ListClustersRequest) Reset() { *m = ListClustersRequest{} }
|
||||
func (m *ListClustersRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*ListClustersRequest) ProtoMessage() {}
|
||||
func (*ListClustersRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{3} }
|
||||
func (m *ListClustersRequest) Reset() { *m = ListClustersRequest{} }
|
||||
func (m *ListClustersRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*ListClustersRequest) ProtoMessage() {}
|
||||
func (*ListClustersRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_2a8715cfb8408734, []int{3}
|
||||
}
|
||||
|
||||
func (m *ListClustersRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ListClustersRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ListClustersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ListClustersRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ListClustersRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ListClustersRequest.Merge(m, src)
|
||||
}
|
||||
func (m *ListClustersRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_ListClustersRequest.Size(m)
|
||||
}
|
||||
func (m *ListClustersRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ListClustersRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ListClustersRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *ListClustersRequest) GetName() string {
|
||||
if m != nil {
|
||||
@@ -92,15 +192,38 @@ func (m *ListClustersRequest) GetName() string {
|
||||
// Response message for BigtableClusterService.ListClusters.
|
||||
type ListClustersResponse struct {
|
||||
// The list of requested Clusters.
|
||||
Clusters []*Cluster `protobuf:"bytes,1,rep,name=clusters" json:"clusters,omitempty"`
|
||||
Clusters []*Cluster `protobuf:"bytes,1,rep,name=clusters,proto3" json:"clusters,omitempty"`
|
||||
// The zones for which clusters could not be retrieved.
|
||||
FailedZones []*Zone `protobuf:"bytes,2,rep,name=failed_zones,json=failedZones" json:"failed_zones,omitempty"`
|
||||
FailedZones []*Zone `protobuf:"bytes,2,rep,name=failed_zones,json=failedZones,proto3" json:"failed_zones,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ListClustersResponse) Reset() { *m = ListClustersResponse{} }
|
||||
func (m *ListClustersResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*ListClustersResponse) ProtoMessage() {}
|
||||
func (*ListClustersResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{4} }
|
||||
func (m *ListClustersResponse) Reset() { *m = ListClustersResponse{} }
|
||||
func (m *ListClustersResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*ListClustersResponse) ProtoMessage() {}
|
||||
func (*ListClustersResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_2a8715cfb8408734, []int{4}
|
||||
}
|
||||
|
||||
func (m *ListClustersResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ListClustersResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ListClustersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ListClustersResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ListClustersResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ListClustersResponse.Merge(m, src)
|
||||
}
|
||||
func (m *ListClustersResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_ListClustersResponse.Size(m)
|
||||
}
|
||||
func (m *ListClustersResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ListClustersResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ListClustersResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *ListClustersResponse) GetClusters() []*Cluster {
|
||||
if m != nil {
|
||||
@@ -120,21 +243,44 @@ func (m *ListClustersResponse) GetFailedZones() []*Zone {
|
||||
type CreateClusterRequest struct {
|
||||
// The unique name of the zone in which to create the cluster.
|
||||
// Values are of the form projects/<project>/zones/<zone>
|
||||
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
// The id to be used when referring to the new cluster within its zone,
|
||||
// e.g. just the "test-cluster" section of the full name
|
||||
// "projects/<project>/zones/<zone>/clusters/test-cluster".
|
||||
ClusterId string `protobuf:"bytes,2,opt,name=cluster_id,json=clusterId" json:"cluster_id,omitempty"`
|
||||
ClusterId string `protobuf:"bytes,2,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"`
|
||||
// The cluster to create.
|
||||
// The "name", "delete_time", and "current_operation" fields must be left
|
||||
// blank.
|
||||
Cluster *Cluster `protobuf:"bytes,3,opt,name=cluster" json:"cluster,omitempty"`
|
||||
Cluster *Cluster `protobuf:"bytes,3,opt,name=cluster,proto3" json:"cluster,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *CreateClusterRequest) Reset() { *m = CreateClusterRequest{} }
|
||||
func (m *CreateClusterRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*CreateClusterRequest) ProtoMessage() {}
|
||||
func (*CreateClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{5} }
|
||||
func (m *CreateClusterRequest) Reset() { *m = CreateClusterRequest{} }
|
||||
func (m *CreateClusterRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*CreateClusterRequest) ProtoMessage() {}
|
||||
func (*CreateClusterRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_2a8715cfb8408734, []int{5}
|
||||
}
|
||||
|
||||
func (m *CreateClusterRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_CreateClusterRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *CreateClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_CreateClusterRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *CreateClusterRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_CreateClusterRequest.Merge(m, src)
|
||||
}
|
||||
func (m *CreateClusterRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_CreateClusterRequest.Size(m)
|
||||
}
|
||||
func (m *CreateClusterRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_CreateClusterRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_CreateClusterRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *CreateClusterRequest) GetName() string {
|
||||
if m != nil {
|
||||
@@ -161,17 +307,40 @@ func (m *CreateClusterRequest) GetCluster() *Cluster {
|
||||
// BigtableClusterService.CreateCluster.
|
||||
type CreateClusterMetadata struct {
|
||||
// The request which prompted the creation of this operation.
|
||||
OriginalRequest *CreateClusterRequest `protobuf:"bytes,1,opt,name=original_request,json=originalRequest" json:"original_request,omitempty"`
|
||||
OriginalRequest *CreateClusterRequest `protobuf:"bytes,1,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"`
|
||||
// The time at which original_request was received.
|
||||
RequestTime *google_protobuf3.Timestamp `protobuf:"bytes,2,opt,name=request_time,json=requestTime" json:"request_time,omitempty"`
|
||||
RequestTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=request_time,json=requestTime,proto3" json:"request_time,omitempty"`
|
||||
// The time at which this operation failed or was completed successfully.
|
||||
FinishTime *google_protobuf3.Timestamp `protobuf:"bytes,3,opt,name=finish_time,json=finishTime" json:"finish_time,omitempty"`
|
||||
FinishTime *timestamp.Timestamp `protobuf:"bytes,3,opt,name=finish_time,json=finishTime,proto3" json:"finish_time,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *CreateClusterMetadata) Reset() { *m = CreateClusterMetadata{} }
|
||||
func (m *CreateClusterMetadata) String() string { return proto.CompactTextString(m) }
|
||||
func (*CreateClusterMetadata) ProtoMessage() {}
|
||||
func (*CreateClusterMetadata) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{6} }
|
||||
func (m *CreateClusterMetadata) Reset() { *m = CreateClusterMetadata{} }
|
||||
func (m *CreateClusterMetadata) String() string { return proto.CompactTextString(m) }
|
||||
func (*CreateClusterMetadata) ProtoMessage() {}
|
||||
func (*CreateClusterMetadata) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_2a8715cfb8408734, []int{6}
|
||||
}
|
||||
|
||||
func (m *CreateClusterMetadata) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_CreateClusterMetadata.Unmarshal(m, b)
|
||||
}
|
||||
func (m *CreateClusterMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_CreateClusterMetadata.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *CreateClusterMetadata) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_CreateClusterMetadata.Merge(m, src)
|
||||
}
|
||||
func (m *CreateClusterMetadata) XXX_Size() int {
|
||||
return xxx_messageInfo_CreateClusterMetadata.Size(m)
|
||||
}
|
||||
func (m *CreateClusterMetadata) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_CreateClusterMetadata.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_CreateClusterMetadata proto.InternalMessageInfo
|
||||
|
||||
func (m *CreateClusterMetadata) GetOriginalRequest() *CreateClusterRequest {
|
||||
if m != nil {
|
||||
@@ -180,14 +349,14 @@ func (m *CreateClusterMetadata) GetOriginalRequest() *CreateClusterRequest {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *CreateClusterMetadata) GetRequestTime() *google_protobuf3.Timestamp {
|
||||
func (m *CreateClusterMetadata) GetRequestTime() *timestamp.Timestamp {
|
||||
if m != nil {
|
||||
return m.RequestTime
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *CreateClusterMetadata) GetFinishTime() *google_protobuf3.Timestamp {
|
||||
func (m *CreateClusterMetadata) GetFinishTime() *timestamp.Timestamp {
|
||||
if m != nil {
|
||||
return m.FinishTime
|
||||
}
|
||||
@@ -198,21 +367,44 @@ func (m *CreateClusterMetadata) GetFinishTime() *google_protobuf3.Timestamp {
|
||||
// BigtableClusterService.UpdateCluster.
|
||||
type UpdateClusterMetadata struct {
|
||||
// The request which prompted the creation of this operation.
|
||||
OriginalRequest *Cluster `protobuf:"bytes,1,opt,name=original_request,json=originalRequest" json:"original_request,omitempty"`
|
||||
OriginalRequest *Cluster `protobuf:"bytes,1,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"`
|
||||
// The time at which original_request was received.
|
||||
RequestTime *google_protobuf3.Timestamp `protobuf:"bytes,2,opt,name=request_time,json=requestTime" json:"request_time,omitempty"`
|
||||
RequestTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=request_time,json=requestTime,proto3" json:"request_time,omitempty"`
|
||||
// The time at which this operation was cancelled. If set, this operation is
|
||||
// in the process of undoing itself (which is guaranteed to succeed) and
|
||||
// cannot be cancelled again.
|
||||
CancelTime *google_protobuf3.Timestamp `protobuf:"bytes,3,opt,name=cancel_time,json=cancelTime" json:"cancel_time,omitempty"`
|
||||
CancelTime *timestamp.Timestamp `protobuf:"bytes,3,opt,name=cancel_time,json=cancelTime,proto3" json:"cancel_time,omitempty"`
|
||||
// The time at which this operation failed or was completed successfully.
|
||||
FinishTime *google_protobuf3.Timestamp `protobuf:"bytes,4,opt,name=finish_time,json=finishTime" json:"finish_time,omitempty"`
|
||||
FinishTime *timestamp.Timestamp `protobuf:"bytes,4,opt,name=finish_time,json=finishTime,proto3" json:"finish_time,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *UpdateClusterMetadata) Reset() { *m = UpdateClusterMetadata{} }
|
||||
func (m *UpdateClusterMetadata) String() string { return proto.CompactTextString(m) }
|
||||
func (*UpdateClusterMetadata) ProtoMessage() {}
|
||||
func (*UpdateClusterMetadata) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{7} }
|
||||
func (m *UpdateClusterMetadata) Reset() { *m = UpdateClusterMetadata{} }
|
||||
func (m *UpdateClusterMetadata) String() string { return proto.CompactTextString(m) }
|
||||
func (*UpdateClusterMetadata) ProtoMessage() {}
|
||||
func (*UpdateClusterMetadata) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_2a8715cfb8408734, []int{7}
|
||||
}
|
||||
|
||||
func (m *UpdateClusterMetadata) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_UpdateClusterMetadata.Unmarshal(m, b)
|
||||
}
|
||||
func (m *UpdateClusterMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_UpdateClusterMetadata.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *UpdateClusterMetadata) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_UpdateClusterMetadata.Merge(m, src)
|
||||
}
|
||||
func (m *UpdateClusterMetadata) XXX_Size() int {
|
||||
return xxx_messageInfo_UpdateClusterMetadata.Size(m)
|
||||
}
|
||||
func (m *UpdateClusterMetadata) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_UpdateClusterMetadata.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_UpdateClusterMetadata proto.InternalMessageInfo
|
||||
|
||||
func (m *UpdateClusterMetadata) GetOriginalRequest() *Cluster {
|
||||
if m != nil {
|
||||
@@ -221,21 +413,21 @@ func (m *UpdateClusterMetadata) GetOriginalRequest() *Cluster {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *UpdateClusterMetadata) GetRequestTime() *google_protobuf3.Timestamp {
|
||||
func (m *UpdateClusterMetadata) GetRequestTime() *timestamp.Timestamp {
|
||||
if m != nil {
|
||||
return m.RequestTime
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *UpdateClusterMetadata) GetCancelTime() *google_protobuf3.Timestamp {
|
||||
func (m *UpdateClusterMetadata) GetCancelTime() *timestamp.Timestamp {
|
||||
if m != nil {
|
||||
return m.CancelTime
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *UpdateClusterMetadata) GetFinishTime() *google_protobuf3.Timestamp {
|
||||
func (m *UpdateClusterMetadata) GetFinishTime() *timestamp.Timestamp {
|
||||
if m != nil {
|
||||
return m.FinishTime
|
||||
}
|
||||
@@ -246,13 +438,36 @@ func (m *UpdateClusterMetadata) GetFinishTime() *google_protobuf3.Timestamp {
|
||||
type DeleteClusterRequest struct {
|
||||
// The unique name of the cluster to be deleted.
|
||||
// Values are of the form projects/<project>/zones/<zone>/clusters/<cluster>
|
||||
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *DeleteClusterRequest) Reset() { *m = DeleteClusterRequest{} }
|
||||
func (m *DeleteClusterRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*DeleteClusterRequest) ProtoMessage() {}
|
||||
func (*DeleteClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{8} }
|
||||
func (m *DeleteClusterRequest) Reset() { *m = DeleteClusterRequest{} }
|
||||
func (m *DeleteClusterRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*DeleteClusterRequest) ProtoMessage() {}
|
||||
func (*DeleteClusterRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_2a8715cfb8408734, []int{8}
|
||||
}
|
||||
|
||||
func (m *DeleteClusterRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_DeleteClusterRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *DeleteClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_DeleteClusterRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *DeleteClusterRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_DeleteClusterRequest.Merge(m, src)
|
||||
}
|
||||
func (m *DeleteClusterRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_DeleteClusterRequest.Size(m)
|
||||
}
|
||||
func (m *DeleteClusterRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_DeleteClusterRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_DeleteClusterRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *DeleteClusterRequest) GetName() string {
|
||||
if m != nil {
|
||||
@@ -265,13 +480,36 @@ func (m *DeleteClusterRequest) GetName() string {
|
||||
type UndeleteClusterRequest struct {
|
||||
// The unique name of the cluster to be un-deleted.
|
||||
// Values are of the form projects/<project>/zones/<zone>/clusters/<cluster>
|
||||
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *UndeleteClusterRequest) Reset() { *m = UndeleteClusterRequest{} }
|
||||
func (m *UndeleteClusterRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*UndeleteClusterRequest) ProtoMessage() {}
|
||||
func (*UndeleteClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{9} }
|
||||
func (m *UndeleteClusterRequest) Reset() { *m = UndeleteClusterRequest{} }
|
||||
func (m *UndeleteClusterRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*UndeleteClusterRequest) ProtoMessage() {}
|
||||
func (*UndeleteClusterRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_2a8715cfb8408734, []int{9}
|
||||
}
|
||||
|
||||
func (m *UndeleteClusterRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_UndeleteClusterRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *UndeleteClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_UndeleteClusterRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *UndeleteClusterRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_UndeleteClusterRequest.Merge(m, src)
|
||||
}
|
||||
func (m *UndeleteClusterRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_UndeleteClusterRequest.Size(m)
|
||||
}
|
||||
func (m *UndeleteClusterRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_UndeleteClusterRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_UndeleteClusterRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *UndeleteClusterRequest) GetName() string {
|
||||
if m != nil {
|
||||
@@ -284,24 +522,47 @@ func (m *UndeleteClusterRequest) GetName() string {
|
||||
// BigtableClusterService.UndeleteCluster.
|
||||
type UndeleteClusterMetadata struct {
|
||||
// The time at which the original request was received.
|
||||
RequestTime *google_protobuf3.Timestamp `protobuf:"bytes,1,opt,name=request_time,json=requestTime" json:"request_time,omitempty"`
|
||||
RequestTime *timestamp.Timestamp `protobuf:"bytes,1,opt,name=request_time,json=requestTime,proto3" json:"request_time,omitempty"`
|
||||
// The time at which this operation failed or was completed successfully.
|
||||
FinishTime *google_protobuf3.Timestamp `protobuf:"bytes,2,opt,name=finish_time,json=finishTime" json:"finish_time,omitempty"`
|
||||
FinishTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=finish_time,json=finishTime,proto3" json:"finish_time,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *UndeleteClusterMetadata) Reset() { *m = UndeleteClusterMetadata{} }
|
||||
func (m *UndeleteClusterMetadata) String() string { return proto.CompactTextString(m) }
|
||||
func (*UndeleteClusterMetadata) ProtoMessage() {}
|
||||
func (*UndeleteClusterMetadata) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{10} }
|
||||
func (m *UndeleteClusterMetadata) Reset() { *m = UndeleteClusterMetadata{} }
|
||||
func (m *UndeleteClusterMetadata) String() string { return proto.CompactTextString(m) }
|
||||
func (*UndeleteClusterMetadata) ProtoMessage() {}
|
||||
func (*UndeleteClusterMetadata) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_2a8715cfb8408734, []int{10}
|
||||
}
|
||||
|
||||
func (m *UndeleteClusterMetadata) GetRequestTime() *google_protobuf3.Timestamp {
|
||||
func (m *UndeleteClusterMetadata) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_UndeleteClusterMetadata.Unmarshal(m, b)
|
||||
}
|
||||
func (m *UndeleteClusterMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_UndeleteClusterMetadata.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *UndeleteClusterMetadata) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_UndeleteClusterMetadata.Merge(m, src)
|
||||
}
|
||||
func (m *UndeleteClusterMetadata) XXX_Size() int {
|
||||
return xxx_messageInfo_UndeleteClusterMetadata.Size(m)
|
||||
}
|
||||
func (m *UndeleteClusterMetadata) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_UndeleteClusterMetadata.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_UndeleteClusterMetadata proto.InternalMessageInfo
|
||||
|
||||
func (m *UndeleteClusterMetadata) GetRequestTime() *timestamp.Timestamp {
|
||||
if m != nil {
|
||||
return m.RequestTime
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *UndeleteClusterMetadata) GetFinishTime() *google_protobuf3.Timestamp {
|
||||
func (m *UndeleteClusterMetadata) GetFinishTime() *timestamp.Timestamp {
|
||||
if m != nil {
|
||||
return m.FinishTime
|
||||
}
|
||||
@@ -311,12 +572,35 @@ func (m *UndeleteClusterMetadata) GetFinishTime() *google_protobuf3.Timestamp {
|
||||
// Metadata type for operations initiated by the V2 BigtableAdmin service.
|
||||
// More complete information for such operations is available via the V2 API.
|
||||
type V2OperationMetadata struct {
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *V2OperationMetadata) Reset() { *m = V2OperationMetadata{} }
|
||||
func (m *V2OperationMetadata) String() string { return proto.CompactTextString(m) }
|
||||
func (*V2OperationMetadata) ProtoMessage() {}
|
||||
func (*V2OperationMetadata) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{11} }
|
||||
func (m *V2OperationMetadata) Reset() { *m = V2OperationMetadata{} }
|
||||
func (m *V2OperationMetadata) String() string { return proto.CompactTextString(m) }
|
||||
func (*V2OperationMetadata) ProtoMessage() {}
|
||||
func (*V2OperationMetadata) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_2a8715cfb8408734, []int{11}
|
||||
}
|
||||
|
||||
func (m *V2OperationMetadata) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_V2OperationMetadata.Unmarshal(m, b)
|
||||
}
|
||||
func (m *V2OperationMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_V2OperationMetadata.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *V2OperationMetadata) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_V2OperationMetadata.Merge(m, src)
|
||||
}
|
||||
func (m *V2OperationMetadata) XXX_Size() int {
|
||||
return xxx_messageInfo_V2OperationMetadata.Size(m)
|
||||
}
|
||||
func (m *V2OperationMetadata) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_V2OperationMetadata.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_V2OperationMetadata proto.InternalMessageInfo
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*ListZonesRequest)(nil), "google.bigtable.admin.cluster.v1.ListZonesRequest")
|
||||
@@ -334,10 +618,10 @@ func init() {
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("google/bigtable/admin/cluster/v1/bigtable_cluster_service_messages.proto", fileDescriptor2)
|
||||
proto.RegisterFile("google/bigtable/admin/cluster/v1/bigtable_cluster_service_messages.proto", fileDescriptor_2a8715cfb8408734)
|
||||
}
|
||||
|
||||
var fileDescriptor2 = []byte{
|
||||
var fileDescriptor_2a8715cfb8408734 = []byte{
|
||||
// 541 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0x4d, 0x6f, 0xd3, 0x40,
|
||||
0x10, 0xd5, 0x26, 0xe5, 0xa3, 0xe3, 0x4a, 0xb4, 0x6e, 0x02, 0x51, 0x24, 0x44, 0x64, 0x50, 0x69,
|
||||
|
@@ -1,35 +1,15 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: google/bigtable/admin/table/v1/bigtable_table_data.proto
|
||||
|
||||
/*
|
||||
Package table is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
google/bigtable/admin/table/v1/bigtable_table_data.proto
|
||||
google/bigtable/admin/table/v1/bigtable_table_service.proto
|
||||
google/bigtable/admin/table/v1/bigtable_table_service_messages.proto
|
||||
|
||||
It has these top-level messages:
|
||||
Table
|
||||
ColumnFamily
|
||||
GcRule
|
||||
CreateTableRequest
|
||||
ListTablesRequest
|
||||
ListTablesResponse
|
||||
GetTableRequest
|
||||
DeleteTableRequest
|
||||
RenameTableRequest
|
||||
CreateColumnFamilyRequest
|
||||
DeleteColumnFamilyRequest
|
||||
BulkDeleteRowsRequest
|
||||
*/
|
||||
package table
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import google_longrunning "google.golang.org/genproto/googleapis/longrunning"
|
||||
import google_protobuf3 "github.com/golang/protobuf/ptypes/duration"
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
duration "github.com/golang/protobuf/ptypes/duration"
|
||||
longrunning "google.golang.org/genproto/googleapis/longrunning"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
@@ -51,6 +31,7 @@ const (
|
||||
var Table_TimestampGranularity_name = map[int32]string{
|
||||
0: "MILLIS",
|
||||
}
|
||||
|
||||
var Table_TimestampGranularity_value = map[string]int32{
|
||||
"MILLIS": 0,
|
||||
}
|
||||
@@ -58,8 +39,9 @@ var Table_TimestampGranularity_value = map[string]int32{
|
||||
func (x Table_TimestampGranularity) String() string {
|
||||
return proto.EnumName(Table_TimestampGranularity_name, int32(x))
|
||||
}
|
||||
|
||||
func (Table_TimestampGranularity) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor0, []int{0, 0}
|
||||
return fileDescriptor_b9ed6c4c9bdaf892, []int{0, 0}
|
||||
}
|
||||
|
||||
// A collection of user data indexed by row, column, and timestamp.
|
||||
@@ -67,23 +49,46 @@ func (Table_TimestampGranularity) EnumDescriptor() ([]byte, []int) {
|
||||
type Table struct {
|
||||
// A unique identifier of the form
|
||||
// <cluster_name>/tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]*
|
||||
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
// If this Table is in the process of being created, the Operation used to
|
||||
// track its progress. As long as this operation is present, the Table will
|
||||
// not accept any Table Admin or Read/Write requests.
|
||||
CurrentOperation *google_longrunning.Operation `protobuf:"bytes,2,opt,name=current_operation,json=currentOperation" json:"current_operation,omitempty"`
|
||||
CurrentOperation *longrunning.Operation `protobuf:"bytes,2,opt,name=current_operation,json=currentOperation,proto3" json:"current_operation,omitempty"`
|
||||
// The column families configured for this table, mapped by column family id.
|
||||
ColumnFamilies map[string]*ColumnFamily `protobuf:"bytes,3,rep,name=column_families,json=columnFamilies" json:"column_families,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
||||
ColumnFamilies map[string]*ColumnFamily `protobuf:"bytes,3,rep,name=column_families,json=columnFamilies,proto3" json:"column_families,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
// The granularity (e.g. MILLIS, MICROS) at which timestamps are stored in
|
||||
// this table. Timestamps not matching the granularity will be rejected.
|
||||
// Cannot be changed once the table is created.
|
||||
Granularity Table_TimestampGranularity `protobuf:"varint,4,opt,name=granularity,enum=google.bigtable.admin.table.v1.Table_TimestampGranularity" json:"granularity,omitempty"`
|
||||
Granularity Table_TimestampGranularity `protobuf:"varint,4,opt,name=granularity,proto3,enum=google.bigtable.admin.table.v1.Table_TimestampGranularity" json:"granularity,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Table) Reset() { *m = Table{} }
|
||||
func (m *Table) String() string { return proto.CompactTextString(m) }
|
||||
func (*Table) ProtoMessage() {}
|
||||
func (*Table) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||
func (m *Table) Reset() { *m = Table{} }
|
||||
func (m *Table) String() string { return proto.CompactTextString(m) }
|
||||
func (*Table) ProtoMessage() {}
|
||||
func (*Table) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b9ed6c4c9bdaf892, []int{0}
|
||||
}
|
||||
|
||||
func (m *Table) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Table.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Table) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Table.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Table) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Table.Merge(m, src)
|
||||
}
|
||||
func (m *Table) XXX_Size() int {
|
||||
return xxx_messageInfo_Table.Size(m)
|
||||
}
|
||||
func (m *Table) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Table.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Table proto.InternalMessageInfo
|
||||
|
||||
func (m *Table) GetName() string {
|
||||
if m != nil {
|
||||
@@ -92,7 +97,7 @@ func (m *Table) GetName() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Table) GetCurrentOperation() *google_longrunning.Operation {
|
||||
func (m *Table) GetCurrentOperation() *longrunning.Operation {
|
||||
if m != nil {
|
||||
return m.CurrentOperation
|
||||
}
|
||||
@@ -118,7 +123,7 @@ type ColumnFamily struct {
|
||||
// A unique identifier of the form <table_name>/columnFamilies/[-_.a-zA-Z0-9]+
|
||||
// The last segment is the same as the "name" field in
|
||||
// google.bigtable.v1.Family.
|
||||
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
// Garbage collection expression specified by the following grammar:
|
||||
// GC = EXPR
|
||||
// | "" ;
|
||||
@@ -144,7 +149,7 @@ type ColumnFamily struct {
|
||||
// Garbage collection executes opportunistically in the background, and so
|
||||
// it's possible for reads to return a cell even if it matches the active GC
|
||||
// expression for its family.
|
||||
GcExpression string `protobuf:"bytes,2,opt,name=gc_expression,json=gcExpression" json:"gc_expression,omitempty"`
|
||||
GcExpression string `protobuf:"bytes,2,opt,name=gc_expression,json=gcExpression,proto3" json:"gc_expression,omitempty"`
|
||||
// Garbage collection rule specified as a protobuf.
|
||||
// Supersedes `gc_expression`.
|
||||
// Must serialize to at most 500 bytes.
|
||||
@@ -152,13 +157,36 @@ type ColumnFamily struct {
|
||||
// NOTE: Garbage collection executes opportunistically in the background, and
|
||||
// so it's possible for reads to return a cell even if it matches the active
|
||||
// GC expression for its family.
|
||||
GcRule *GcRule `protobuf:"bytes,3,opt,name=gc_rule,json=gcRule" json:"gc_rule,omitempty"`
|
||||
GcRule *GcRule `protobuf:"bytes,3,opt,name=gc_rule,json=gcRule,proto3" json:"gc_rule,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ColumnFamily) Reset() { *m = ColumnFamily{} }
|
||||
func (m *ColumnFamily) String() string { return proto.CompactTextString(m) }
|
||||
func (*ColumnFamily) ProtoMessage() {}
|
||||
func (*ColumnFamily) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
|
||||
func (m *ColumnFamily) Reset() { *m = ColumnFamily{} }
|
||||
func (m *ColumnFamily) String() string { return proto.CompactTextString(m) }
|
||||
func (*ColumnFamily) ProtoMessage() {}
|
||||
func (*ColumnFamily) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b9ed6c4c9bdaf892, []int{1}
|
||||
}
|
||||
|
||||
func (m *ColumnFamily) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ColumnFamily.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ColumnFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ColumnFamily.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ColumnFamily) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ColumnFamily.Merge(m, src)
|
||||
}
|
||||
func (m *ColumnFamily) XXX_Size() int {
|
||||
return xxx_messageInfo_ColumnFamily.Size(m)
|
||||
}
|
||||
func (m *ColumnFamily) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ColumnFamily.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ColumnFamily proto.InternalMessageInfo
|
||||
|
||||
func (m *ColumnFamily) GetName() string {
|
||||
if m != nil {
|
||||
@@ -188,35 +216,64 @@ type GcRule struct {
|
||||
// *GcRule_MaxAge
|
||||
// *GcRule_Intersection_
|
||||
// *GcRule_Union_
|
||||
Rule isGcRule_Rule `protobuf_oneof:"rule"`
|
||||
Rule isGcRule_Rule `protobuf_oneof:"rule"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *GcRule) Reset() { *m = GcRule{} }
|
||||
func (m *GcRule) String() string { return proto.CompactTextString(m) }
|
||||
func (*GcRule) ProtoMessage() {}
|
||||
func (*GcRule) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
|
||||
func (m *GcRule) Reset() { *m = GcRule{} }
|
||||
func (m *GcRule) String() string { return proto.CompactTextString(m) }
|
||||
func (*GcRule) ProtoMessage() {}
|
||||
func (*GcRule) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b9ed6c4c9bdaf892, []int{2}
|
||||
}
|
||||
|
||||
func (m *GcRule) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_GcRule.Unmarshal(m, b)
|
||||
}
|
||||
func (m *GcRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_GcRule.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *GcRule) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_GcRule.Merge(m, src)
|
||||
}
|
||||
func (m *GcRule) XXX_Size() int {
|
||||
return xxx_messageInfo_GcRule.Size(m)
|
||||
}
|
||||
func (m *GcRule) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_GcRule.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_GcRule proto.InternalMessageInfo
|
||||
|
||||
type isGcRule_Rule interface {
|
||||
isGcRule_Rule()
|
||||
}
|
||||
|
||||
type GcRule_MaxNumVersions struct {
|
||||
MaxNumVersions int32 `protobuf:"varint,1,opt,name=max_num_versions,json=maxNumVersions,oneof"`
|
||||
MaxNumVersions int32 `protobuf:"varint,1,opt,name=max_num_versions,json=maxNumVersions,proto3,oneof"`
|
||||
}
|
||||
|
||||
type GcRule_MaxAge struct {
|
||||
MaxAge *google_protobuf3.Duration `protobuf:"bytes,2,opt,name=max_age,json=maxAge,oneof"`
|
||||
MaxAge *duration.Duration `protobuf:"bytes,2,opt,name=max_age,json=maxAge,proto3,oneof"`
|
||||
}
|
||||
|
||||
type GcRule_Intersection_ struct {
|
||||
Intersection *GcRule_Intersection `protobuf:"bytes,3,opt,name=intersection,oneof"`
|
||||
Intersection *GcRule_Intersection `protobuf:"bytes,3,opt,name=intersection,proto3,oneof"`
|
||||
}
|
||||
|
||||
type GcRule_Union_ struct {
|
||||
Union *GcRule_Union `protobuf:"bytes,4,opt,name=union,oneof"`
|
||||
Union *GcRule_Union `protobuf:"bytes,4,opt,name=union,proto3,oneof"`
|
||||
}
|
||||
|
||||
func (*GcRule_MaxNumVersions) isGcRule_Rule() {}
|
||||
func (*GcRule_MaxAge) isGcRule_Rule() {}
|
||||
func (*GcRule_Intersection_) isGcRule_Rule() {}
|
||||
func (*GcRule_Union_) isGcRule_Rule() {}
|
||||
|
||||
func (*GcRule_MaxAge) isGcRule_Rule() {}
|
||||
|
||||
func (*GcRule_Intersection_) isGcRule_Rule() {}
|
||||
|
||||
func (*GcRule_Union_) isGcRule_Rule() {}
|
||||
|
||||
func (m *GcRule) GetRule() isGcRule_Rule {
|
||||
if m != nil {
|
||||
@@ -232,7 +289,7 @@ func (m *GcRule) GetMaxNumVersions() int32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *GcRule) GetMaxAge() *google_protobuf3.Duration {
|
||||
func (m *GcRule) GetMaxAge() *duration.Duration {
|
||||
if x, ok := m.GetRule().(*GcRule_MaxAge); ok {
|
||||
return x.MaxAge
|
||||
}
|
||||
@@ -306,7 +363,7 @@ func _GcRule_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer)
|
||||
if wire != proto.WireBytes {
|
||||
return true, proto.ErrInternalBadWireType
|
||||
}
|
||||
msg := new(google_protobuf3.Duration)
|
||||
msg := new(duration.Duration)
|
||||
err := b.DecodeMessage(msg)
|
||||
m.Rule = &GcRule_MaxAge{msg}
|
||||
return true, err
|
||||
@@ -336,21 +393,21 @@ func _GcRule_OneofSizer(msg proto.Message) (n int) {
|
||||
// rule
|
||||
switch x := m.Rule.(type) {
|
||||
case *GcRule_MaxNumVersions:
|
||||
n += proto.SizeVarint(1<<3 | proto.WireVarint)
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(x.MaxNumVersions))
|
||||
case *GcRule_MaxAge:
|
||||
s := proto.Size(x.MaxAge)
|
||||
n += proto.SizeVarint(2<<3 | proto.WireBytes)
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(s))
|
||||
n += s
|
||||
case *GcRule_Intersection_:
|
||||
s := proto.Size(x.Intersection)
|
||||
n += proto.SizeVarint(3<<3 | proto.WireBytes)
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(s))
|
||||
n += s
|
||||
case *GcRule_Union_:
|
||||
s := proto.Size(x.Union)
|
||||
n += proto.SizeVarint(4<<3 | proto.WireBytes)
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(s))
|
||||
n += s
|
||||
case nil:
|
||||
@@ -363,13 +420,36 @@ func _GcRule_OneofSizer(msg proto.Message) (n int) {
|
||||
// A GcRule which deletes cells matching all of the given rules.
|
||||
type GcRule_Intersection struct {
|
||||
// Only delete cells which would be deleted by every element of `rules`.
|
||||
Rules []*GcRule `protobuf:"bytes,1,rep,name=rules" json:"rules,omitempty"`
|
||||
Rules []*GcRule `protobuf:"bytes,1,rep,name=rules,proto3" json:"rules,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *GcRule_Intersection) Reset() { *m = GcRule_Intersection{} }
|
||||
func (m *GcRule_Intersection) String() string { return proto.CompactTextString(m) }
|
||||
func (*GcRule_Intersection) ProtoMessage() {}
|
||||
func (*GcRule_Intersection) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} }
|
||||
func (m *GcRule_Intersection) Reset() { *m = GcRule_Intersection{} }
|
||||
func (m *GcRule_Intersection) String() string { return proto.CompactTextString(m) }
|
||||
func (*GcRule_Intersection) ProtoMessage() {}
|
||||
func (*GcRule_Intersection) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b9ed6c4c9bdaf892, []int{2, 0}
|
||||
}
|
||||
|
||||
func (m *GcRule_Intersection) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_GcRule_Intersection.Unmarshal(m, b)
|
||||
}
|
||||
func (m *GcRule_Intersection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_GcRule_Intersection.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *GcRule_Intersection) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_GcRule_Intersection.Merge(m, src)
|
||||
}
|
||||
func (m *GcRule_Intersection) XXX_Size() int {
|
||||
return xxx_messageInfo_GcRule_Intersection.Size(m)
|
||||
}
|
||||
func (m *GcRule_Intersection) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_GcRule_Intersection.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_GcRule_Intersection proto.InternalMessageInfo
|
||||
|
||||
func (m *GcRule_Intersection) GetRules() []*GcRule {
|
||||
if m != nil {
|
||||
@@ -381,13 +461,36 @@ func (m *GcRule_Intersection) GetRules() []*GcRule {
|
||||
// A GcRule which deletes cells matching any of the given rules.
|
||||
type GcRule_Union struct {
|
||||
// Delete cells which would be deleted by any element of `rules`.
|
||||
Rules []*GcRule `protobuf:"bytes,1,rep,name=rules" json:"rules,omitempty"`
|
||||
Rules []*GcRule `protobuf:"bytes,1,rep,name=rules,proto3" json:"rules,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *GcRule_Union) Reset() { *m = GcRule_Union{} }
|
||||
func (m *GcRule_Union) String() string { return proto.CompactTextString(m) }
|
||||
func (*GcRule_Union) ProtoMessage() {}
|
||||
func (*GcRule_Union) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 1} }
|
||||
func (m *GcRule_Union) Reset() { *m = GcRule_Union{} }
|
||||
func (m *GcRule_Union) String() string { return proto.CompactTextString(m) }
|
||||
func (*GcRule_Union) ProtoMessage() {}
|
||||
func (*GcRule_Union) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b9ed6c4c9bdaf892, []int{2, 1}
|
||||
}
|
||||
|
||||
func (m *GcRule_Union) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_GcRule_Union.Unmarshal(m, b)
|
||||
}
|
||||
func (m *GcRule_Union) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_GcRule_Union.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *GcRule_Union) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_GcRule_Union.Merge(m, src)
|
||||
}
|
||||
func (m *GcRule_Union) XXX_Size() int {
|
||||
return xxx_messageInfo_GcRule_Union.Size(m)
|
||||
}
|
||||
func (m *GcRule_Union) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_GcRule_Union.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_GcRule_Union proto.InternalMessageInfo
|
||||
|
||||
func (m *GcRule_Union) GetRules() []*GcRule {
|
||||
if m != nil {
|
||||
@@ -397,19 +500,20 @@ func (m *GcRule_Union) GetRules() []*GcRule {
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterEnum("google.bigtable.admin.table.v1.Table_TimestampGranularity", Table_TimestampGranularity_name, Table_TimestampGranularity_value)
|
||||
proto.RegisterType((*Table)(nil), "google.bigtable.admin.table.v1.Table")
|
||||
proto.RegisterMapType((map[string]*ColumnFamily)(nil), "google.bigtable.admin.table.v1.Table.ColumnFamiliesEntry")
|
||||
proto.RegisterType((*ColumnFamily)(nil), "google.bigtable.admin.table.v1.ColumnFamily")
|
||||
proto.RegisterType((*GcRule)(nil), "google.bigtable.admin.table.v1.GcRule")
|
||||
proto.RegisterType((*GcRule_Intersection)(nil), "google.bigtable.admin.table.v1.GcRule.Intersection")
|
||||
proto.RegisterType((*GcRule_Union)(nil), "google.bigtable.admin.table.v1.GcRule.Union")
|
||||
proto.RegisterEnum("google.bigtable.admin.table.v1.Table_TimestampGranularity", Table_TimestampGranularity_name, Table_TimestampGranularity_value)
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("google/bigtable/admin/table/v1/bigtable_table_data.proto", fileDescriptor0)
|
||||
proto.RegisterFile("google/bigtable/admin/table/v1/bigtable_table_data.proto", fileDescriptor_b9ed6c4c9bdaf892)
|
||||
}
|
||||
|
||||
var fileDescriptor0 = []byte{
|
||||
var fileDescriptor_b9ed6c4c9bdaf892 = []byte{
|
||||
// 579 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x94, 0x61, 0x6b, 0xd3, 0x40,
|
||||
0x18, 0xc7, 0x9b, 0xa5, 0xed, 0xd8, 0xb3, 0x3a, 0xeb, 0x29, 0x52, 0x0b, 0x4a, 0xc9, 0x40, 0x8a,
|
||||
|
@@ -3,15 +3,14 @@
|
||||
|
||||
package table
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import _ "google.golang.org/genproto/googleapis/api/annotations"
|
||||
import google_protobuf2 "github.com/golang/protobuf/ptypes/empty"
|
||||
|
||||
import (
|
||||
context "golang.org/x/net/context"
|
||||
context "context"
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
empty "github.com/golang/protobuf/ptypes/empty"
|
||||
_ "google.golang.org/genproto/googleapis/api/annotations"
|
||||
grpc "google.golang.org/grpc"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
@@ -19,6 +18,55 @@ var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("google/bigtable/admin/table/v1/bigtable_table_service.proto", fileDescriptor_3185ad3e7140bd14)
|
||||
}
|
||||
|
||||
var fileDescriptor_3185ad3e7140bd14 = []byte{
|
||||
// 560 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x95, 0xbf, 0x6f, 0xd4, 0x30,
|
||||
0x14, 0xc7, 0x65, 0x06, 0x84, 0x7c, 0x88, 0xc1, 0x42, 0x0c, 0x07, 0x62, 0x88, 0xc4, 0x12, 0xa1,
|
||||
0x58, 0xb9, 0x82, 0x68, 0xaf, 0xaa, 0x84, 0x72, 0xd7, 0x56, 0xa2, 0x20, 0x95, 0x03, 0x16, 0x96,
|
||||
0xca, 0xc9, 0x3d, 0xa2, 0x40, 0x62, 0x87, 0xd8, 0x39, 0x54, 0x50, 0x17, 0x16, 0xfe, 0x00, 0x58,
|
||||
0x61, 0x62, 0x64, 0x82, 0x1d, 0x76, 0x56, 0xfe, 0x05, 0xfe, 0x10, 0x14, 0x3b, 0xa1, 0x69, 0xf9,
|
||||
0xe1, 0x73, 0xd5, 0x25, 0xe7, 0xb3, 0xbf, 0xdf, 0xf7, 0x3e, 0xcf, 0x7e, 0x96, 0xf1, 0x7a, 0x2a,
|
||||
0x44, 0x9a, 0x03, 0x8d, 0xb3, 0x54, 0xb1, 0x38, 0x07, 0xca, 0xe6, 0x45, 0xc6, 0xa9, 0x19, 0x2f,
|
||||
0xc2, 0xdf, 0xf3, 0x7b, 0xe6, 0x2b, 0xa1, 0x5a, 0x64, 0x09, 0x04, 0x65, 0x25, 0x94, 0x20, 0x57,
|
||||
0x8d, 0x39, 0xe8, 0x44, 0x81, 0x36, 0x07, 0x66, 0xbc, 0x08, 0x87, 0x57, 0xda, 0xe0, 0xac, 0xcc,
|
||||
0x28, 0xe3, 0x5c, 0x28, 0xa6, 0x32, 0xc1, 0xa5, 0x71, 0x0f, 0x57, 0xdd, 0x52, 0xcf, 0x99, 0x62,
|
||||
0xad, 0x73, 0x7a, 0x22, 0xe8, 0xbd, 0x02, 0xa4, 0x64, 0x29, 0x74, 0xf9, 0x2f, 0xb7, 0x51, 0xf4,
|
||||
0xbf, 0xb8, 0x7e, 0x42, 0xa1, 0x28, 0xd5, 0xbe, 0x59, 0x1c, 0x7d, 0x3d, 0x8f, 0x2f, 0x46, 0x6d,
|
||||
0x98, 0x87, 0xcd, 0xe7, 0x81, 0x09, 0x42, 0x3e, 0x22, 0x3c, 0x98, 0x54, 0xc0, 0x94, 0x99, 0x26,
|
||||
0xa3, 0xe0, 0xff, 0x9b, 0x10, 0xf4, 0xc4, 0x33, 0x78, 0x5e, 0x83, 0x54, 0xc3, 0x6b, 0x36, 0x8f,
|
||||
0x56, 0x7b, 0xe3, 0xd7, 0x3f, 0x7e, 0xbe, 0x3d, 0x73, 0xc3, 0xa3, 0x4d, 0x4d, 0xaf, 0x38, 0x2b,
|
||||
0x60, 0xa3, 0xac, 0xc4, 0x53, 0x48, 0x94, 0xa4, 0x3e, 0x7d, 0x29, 0x38, 0x34, 0xbf, 0x49, 0x5e,
|
||||
0x4b, 0x05, 0x95, 0xa4, 0xfe, 0x81, 0xd9, 0x01, 0x39, 0x46, 0x3e, 0xf9, 0x84, 0x30, 0xbe, 0x9b,
|
||||
0x49, 0xa5, 0x23, 0x49, 0x12, 0xda, 0x32, 0x1e, 0x6a, 0x3b, 0xc8, 0x91, 0x8b, 0x45, 0x96, 0x82,
|
||||
0x4b, 0xf0, 0x6e, 0x69, 0xe2, 0x90, 0xb8, 0x12, 0x93, 0xf7, 0x08, 0x9f, 0xdb, 0x06, 0x13, 0x8e,
|
||||
0x50, 0x5b, 0xe6, 0x4e, 0xe9, 0xb8, 0x9f, 0x6b, 0x9a, 0x6e, 0x85, 0x84, 0x4b, 0xd2, 0xb5, 0x70,
|
||||
0xd4, 0x3f, 0x20, 0xef, 0x10, 0x1e, 0x4c, 0x21, 0x87, 0xa5, 0x4f, 0xbd, 0x27, 0xee, 0x28, 0x2f,
|
||||
0x75, 0x9e, 0xae, 0xe1, 0x82, 0xcd, 0xa6, 0xe1, 0x3a, 0x2c, 0xff, 0x04, 0x58, 0x1f, 0x10, 0x1e,
|
||||
0xcc, 0xa0, 0xb1, 0x2c, 0x89, 0xd5, 0x13, 0xdb, 0xb0, 0x26, 0x1a, 0x6b, 0xc3, 0x5b, 0x75, 0xc6,
|
||||
0x1a, 0x57, 0x3a, 0x4b, 0xd3, 0x86, 0xdf, 0x11, 0x26, 0xe6, 0x02, 0x4c, 0x44, 0x5e, 0x17, 0x7c,
|
||||
0x8b, 0x15, 0x59, 0xbe, 0x4f, 0xd6, 0x96, 0xbb, 0x34, 0x7d, 0x4f, 0x87, 0x7b, 0xdd, 0x6a, 0xed,
|
||||
0x99, 0xbc, 0x1d, 0x5d, 0xc4, 0xa6, 0x77, 0xdb, 0xb9, 0x08, 0x9a, 0x1c, 0xc6, 0xc9, 0xcc, 0x9d,
|
||||
0xfa, 0x86, 0x30, 0x79, 0x54, 0xce, 0x8f, 0x17, 0xe3, 0x44, 0xe4, 0xc8, 0x7f, 0x4f, 0xf3, 0x6f,
|
||||
0x0f, 0x23, 0x57, 0xfe, 0x63, 0xf8, 0xcd, 0xa9, 0x20, 0x9f, 0x7c, 0x41, 0x98, 0x98, 0xce, 0x74,
|
||||
0x3b, 0x8e, 0x3f, 0x3d, 0xb6, 0xee, 0xb9, 0xa3, 0xc1, 0xa7, 0xfe, 0x29, 0x80, 0x93, 0xcf, 0x08,
|
||||
0x5f, 0x88, 0xea, 0xfc, 0x99, 0xa1, 0x98, 0x89, 0x17, 0x92, 0xdc, 0xb4, 0x11, 0x1f, 0xd5, 0xdb,
|
||||
0x68, 0xef, 0x6b, 0xda, 0x1d, 0x6f, 0x4b, 0xd3, 0x9a, 0x57, 0xc3, 0xa9, 0xe3, 0xe3, 0x23, 0xe9,
|
||||
0xc6, 0xc8, 0x8f, 0xde, 0x20, 0xec, 0x25, 0xa2, 0xb0, 0x70, 0x46, 0xc3, 0xbf, 0x3d, 0x32, 0x72,
|
||||
0xb7, 0xc1, 0xdb, 0x45, 0x8f, 0x27, 0xad, 0x3b, 0x15, 0x39, 0xe3, 0x69, 0x20, 0xaa, 0x94, 0xa6,
|
||||
0xc0, 0x35, 0x3c, 0x35, 0x4b, 0xac, 0xcc, 0xe4, 0xbf, 0xde, 0xc1, 0x75, 0x3d, 0x88, 0xcf, 0x6a,
|
||||
0xfd, 0xca, 0xaf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x03, 0x65, 0xb4, 0xe0, 0xeb, 0x07, 0x00, 0x00,
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConn
|
||||
@@ -27,8 +75,9 @@ var _ grpc.ClientConn
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion4
|
||||
|
||||
// Client API for BigtableTableService service
|
||||
|
||||
// BigtableTableServiceClient is the client API for BigtableTableService service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||||
type BigtableTableServiceClient interface {
|
||||
// Creates a new table, to be served from a specified cluster.
|
||||
// The table can be created with a full set of initial column families,
|
||||
@@ -39,18 +88,18 @@ type BigtableTableServiceClient interface {
|
||||
// Gets the schema of the specified table, including its column families.
|
||||
GetTable(ctx context.Context, in *GetTableRequest, opts ...grpc.CallOption) (*Table, error)
|
||||
// Permanently deletes a specified table and all of its data.
|
||||
DeleteTable(ctx context.Context, in *DeleteTableRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error)
|
||||
DeleteTable(ctx context.Context, in *DeleteTableRequest, opts ...grpc.CallOption) (*empty.Empty, error)
|
||||
// Changes the name of a specified table.
|
||||
// Cannot be used to move tables between clusters, zones, or projects.
|
||||
RenameTable(ctx context.Context, in *RenameTableRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error)
|
||||
RenameTable(ctx context.Context, in *RenameTableRequest, opts ...grpc.CallOption) (*empty.Empty, error)
|
||||
// Creates a new column family within a specified table.
|
||||
CreateColumnFamily(ctx context.Context, in *CreateColumnFamilyRequest, opts ...grpc.CallOption) (*ColumnFamily, error)
|
||||
// Changes the configuration of a specified column family.
|
||||
UpdateColumnFamily(ctx context.Context, in *ColumnFamily, opts ...grpc.CallOption) (*ColumnFamily, error)
|
||||
// Permanently deletes a specified column family and all of its data.
|
||||
DeleteColumnFamily(ctx context.Context, in *DeleteColumnFamilyRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error)
|
||||
DeleteColumnFamily(ctx context.Context, in *DeleteColumnFamilyRequest, opts ...grpc.CallOption) (*empty.Empty, error)
|
||||
// Delete all rows in a table corresponding to a particular prefix
|
||||
BulkDeleteRows(ctx context.Context, in *BulkDeleteRowsRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error)
|
||||
BulkDeleteRows(ctx context.Context, in *BulkDeleteRowsRequest, opts ...grpc.CallOption) (*empty.Empty, error)
|
||||
}
|
||||
|
||||
type bigtableTableServiceClient struct {
|
||||
@@ -63,7 +112,7 @@ func NewBigtableTableServiceClient(cc *grpc.ClientConn) BigtableTableServiceClie
|
||||
|
||||
func (c *bigtableTableServiceClient) CreateTable(ctx context.Context, in *CreateTableRequest, opts ...grpc.CallOption) (*Table, error) {
|
||||
out := new(Table)
|
||||
err := grpc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/CreateTable", in, out, c.cc, opts...)
|
||||
err := c.cc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/CreateTable", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -72,7 +121,7 @@ func (c *bigtableTableServiceClient) CreateTable(ctx context.Context, in *Create
|
||||
|
||||
func (c *bigtableTableServiceClient) ListTables(ctx context.Context, in *ListTablesRequest, opts ...grpc.CallOption) (*ListTablesResponse, error) {
|
||||
out := new(ListTablesResponse)
|
||||
err := grpc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/ListTables", in, out, c.cc, opts...)
|
||||
err := c.cc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/ListTables", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -81,25 +130,25 @@ func (c *bigtableTableServiceClient) ListTables(ctx context.Context, in *ListTab
|
||||
|
||||
func (c *bigtableTableServiceClient) GetTable(ctx context.Context, in *GetTableRequest, opts ...grpc.CallOption) (*Table, error) {
|
||||
out := new(Table)
|
||||
err := grpc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/GetTable", in, out, c.cc, opts...)
|
||||
err := c.cc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/GetTable", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *bigtableTableServiceClient) DeleteTable(ctx context.Context, in *DeleteTableRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) {
|
||||
out := new(google_protobuf2.Empty)
|
||||
err := grpc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/DeleteTable", in, out, c.cc, opts...)
|
||||
func (c *bigtableTableServiceClient) DeleteTable(ctx context.Context, in *DeleteTableRequest, opts ...grpc.CallOption) (*empty.Empty, error) {
|
||||
out := new(empty.Empty)
|
||||
err := c.cc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/DeleteTable", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *bigtableTableServiceClient) RenameTable(ctx context.Context, in *RenameTableRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) {
|
||||
out := new(google_protobuf2.Empty)
|
||||
err := grpc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/RenameTable", in, out, c.cc, opts...)
|
||||
func (c *bigtableTableServiceClient) RenameTable(ctx context.Context, in *RenameTableRequest, opts ...grpc.CallOption) (*empty.Empty, error) {
|
||||
out := new(empty.Empty)
|
||||
err := c.cc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/RenameTable", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -108,7 +157,7 @@ func (c *bigtableTableServiceClient) RenameTable(ctx context.Context, in *Rename
|
||||
|
||||
func (c *bigtableTableServiceClient) CreateColumnFamily(ctx context.Context, in *CreateColumnFamilyRequest, opts ...grpc.CallOption) (*ColumnFamily, error) {
|
||||
out := new(ColumnFamily)
|
||||
err := grpc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/CreateColumnFamily", in, out, c.cc, opts...)
|
||||
err := c.cc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/CreateColumnFamily", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -117,33 +166,32 @@ func (c *bigtableTableServiceClient) CreateColumnFamily(ctx context.Context, in
|
||||
|
||||
func (c *bigtableTableServiceClient) UpdateColumnFamily(ctx context.Context, in *ColumnFamily, opts ...grpc.CallOption) (*ColumnFamily, error) {
|
||||
out := new(ColumnFamily)
|
||||
err := grpc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/UpdateColumnFamily", in, out, c.cc, opts...)
|
||||
err := c.cc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/UpdateColumnFamily", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *bigtableTableServiceClient) DeleteColumnFamily(ctx context.Context, in *DeleteColumnFamilyRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) {
|
||||
out := new(google_protobuf2.Empty)
|
||||
err := grpc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/DeleteColumnFamily", in, out, c.cc, opts...)
|
||||
func (c *bigtableTableServiceClient) DeleteColumnFamily(ctx context.Context, in *DeleteColumnFamilyRequest, opts ...grpc.CallOption) (*empty.Empty, error) {
|
||||
out := new(empty.Empty)
|
||||
err := c.cc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/DeleteColumnFamily", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *bigtableTableServiceClient) BulkDeleteRows(ctx context.Context, in *BulkDeleteRowsRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) {
|
||||
out := new(google_protobuf2.Empty)
|
||||
err := grpc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/BulkDeleteRows", in, out, c.cc, opts...)
|
||||
func (c *bigtableTableServiceClient) BulkDeleteRows(ctx context.Context, in *BulkDeleteRowsRequest, opts ...grpc.CallOption) (*empty.Empty, error) {
|
||||
out := new(empty.Empty)
|
||||
err := c.cc.Invoke(ctx, "/google.bigtable.admin.table.v1.BigtableTableService/BulkDeleteRows", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Server API for BigtableTableService service
|
||||
|
||||
// BigtableTableServiceServer is the server API for BigtableTableService service.
|
||||
type BigtableTableServiceServer interface {
|
||||
// Creates a new table, to be served from a specified cluster.
|
||||
// The table can be created with a full set of initial column families,
|
||||
@@ -154,18 +202,18 @@ type BigtableTableServiceServer interface {
|
||||
// Gets the schema of the specified table, including its column families.
|
||||
GetTable(context.Context, *GetTableRequest) (*Table, error)
|
||||
// Permanently deletes a specified table and all of its data.
|
||||
DeleteTable(context.Context, *DeleteTableRequest) (*google_protobuf2.Empty, error)
|
||||
DeleteTable(context.Context, *DeleteTableRequest) (*empty.Empty, error)
|
||||
// Changes the name of a specified table.
|
||||
// Cannot be used to move tables between clusters, zones, or projects.
|
||||
RenameTable(context.Context, *RenameTableRequest) (*google_protobuf2.Empty, error)
|
||||
RenameTable(context.Context, *RenameTableRequest) (*empty.Empty, error)
|
||||
// Creates a new column family within a specified table.
|
||||
CreateColumnFamily(context.Context, *CreateColumnFamilyRequest) (*ColumnFamily, error)
|
||||
// Changes the configuration of a specified column family.
|
||||
UpdateColumnFamily(context.Context, *ColumnFamily) (*ColumnFamily, error)
|
||||
// Permanently deletes a specified column family and all of its data.
|
||||
DeleteColumnFamily(context.Context, *DeleteColumnFamilyRequest) (*google_protobuf2.Empty, error)
|
||||
DeleteColumnFamily(context.Context, *DeleteColumnFamilyRequest) (*empty.Empty, error)
|
||||
// Delete all rows in a table corresponding to a particular prefix
|
||||
BulkDeleteRows(context.Context, *BulkDeleteRowsRequest) (*google_protobuf2.Empty, error)
|
||||
BulkDeleteRows(context.Context, *BulkDeleteRowsRequest) (*empty.Empty, error)
|
||||
}
|
||||
|
||||
func RegisterBigtableTableServiceServer(s *grpc.Server, srv BigtableTableServiceServer) {
|
||||
@@ -378,46 +426,3 @@ var _BigtableTableService_serviceDesc = grpc.ServiceDesc{
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "google/bigtable/admin/table/v1/bigtable_table_service.proto",
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("google/bigtable/admin/table/v1/bigtable_table_service.proto", fileDescriptor1)
|
||||
}
|
||||
|
||||
var fileDescriptor1 = []byte{
|
||||
// 560 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x95, 0xbf, 0x6f, 0xd4, 0x30,
|
||||
0x14, 0xc7, 0x65, 0x06, 0x84, 0x7c, 0x88, 0xc1, 0x42, 0x0c, 0x07, 0x62, 0x88, 0xc4, 0x12, 0xa1,
|
||||
0x58, 0xb9, 0x82, 0x68, 0xaf, 0xaa, 0x84, 0x72, 0xd7, 0x56, 0xa2, 0x20, 0x95, 0x03, 0x16, 0x96,
|
||||
0xca, 0xc9, 0x3d, 0xa2, 0x40, 0x62, 0x87, 0xd8, 0x39, 0x54, 0x50, 0x17, 0x16, 0xfe, 0x00, 0x58,
|
||||
0x61, 0x62, 0x64, 0x82, 0x1d, 0x76, 0x56, 0xfe, 0x05, 0xfe, 0x10, 0x14, 0x3b, 0xa1, 0x69, 0xf9,
|
||||
0xe1, 0x73, 0xd5, 0x25, 0xe7, 0xb3, 0xbf, 0xdf, 0xf7, 0x3e, 0xcf, 0x7e, 0x96, 0xf1, 0x7a, 0x2a,
|
||||
0x44, 0x9a, 0x03, 0x8d, 0xb3, 0x54, 0xb1, 0x38, 0x07, 0xca, 0xe6, 0x45, 0xc6, 0xa9, 0x19, 0x2f,
|
||||
0xc2, 0xdf, 0xf3, 0x7b, 0xe6, 0x2b, 0xa1, 0x5a, 0x64, 0x09, 0x04, 0x65, 0x25, 0x94, 0x20, 0x57,
|
||||
0x8d, 0x39, 0xe8, 0x44, 0x81, 0x36, 0x07, 0x66, 0xbc, 0x08, 0x87, 0x57, 0xda, 0xe0, 0xac, 0xcc,
|
||||
0x28, 0xe3, 0x5c, 0x28, 0xa6, 0x32, 0xc1, 0xa5, 0x71, 0x0f, 0x57, 0xdd, 0x52, 0xcf, 0x99, 0x62,
|
||||
0xad, 0x73, 0x7a, 0x22, 0xe8, 0xbd, 0x02, 0xa4, 0x64, 0x29, 0x74, 0xf9, 0x2f, 0xb7, 0x51, 0xf4,
|
||||
0xbf, 0xb8, 0x7e, 0x42, 0xa1, 0x28, 0xd5, 0xbe, 0x59, 0x1c, 0x7d, 0x3d, 0x8f, 0x2f, 0x46, 0x6d,
|
||||
0x98, 0x87, 0xcd, 0xe7, 0x81, 0x09, 0x42, 0x3e, 0x22, 0x3c, 0x98, 0x54, 0xc0, 0x94, 0x99, 0x26,
|
||||
0xa3, 0xe0, 0xff, 0x9b, 0x10, 0xf4, 0xc4, 0x33, 0x78, 0x5e, 0x83, 0x54, 0xc3, 0x6b, 0x36, 0x8f,
|
||||
0x56, 0x7b, 0xe3, 0xd7, 0x3f, 0x7e, 0xbe, 0x3d, 0x73, 0xc3, 0xa3, 0x4d, 0x4d, 0xaf, 0x38, 0x2b,
|
||||
0x60, 0xa3, 0xac, 0xc4, 0x53, 0x48, 0x94, 0xa4, 0x3e, 0x7d, 0x29, 0x38, 0x34, 0xbf, 0x49, 0x5e,
|
||||
0x4b, 0x05, 0x95, 0xa4, 0xfe, 0x81, 0xd9, 0x01, 0x39, 0x46, 0x3e, 0xf9, 0x84, 0x30, 0xbe, 0x9b,
|
||||
0x49, 0xa5, 0x23, 0x49, 0x12, 0xda, 0x32, 0x1e, 0x6a, 0x3b, 0xc8, 0x91, 0x8b, 0x45, 0x96, 0x82,
|
||||
0x4b, 0xf0, 0x6e, 0x69, 0xe2, 0x90, 0xb8, 0x12, 0x93, 0xf7, 0x08, 0x9f, 0xdb, 0x06, 0x13, 0x8e,
|
||||
0x50, 0x5b, 0xe6, 0x4e, 0xe9, 0xb8, 0x9f, 0x6b, 0x9a, 0x6e, 0x85, 0x84, 0x4b, 0xd2, 0xb5, 0x70,
|
||||
0xd4, 0x3f, 0x20, 0xef, 0x10, 0x1e, 0x4c, 0x21, 0x87, 0xa5, 0x4f, 0xbd, 0x27, 0xee, 0x28, 0x2f,
|
||||
0x75, 0x9e, 0xae, 0xe1, 0x82, 0xcd, 0xa6, 0xe1, 0x3a, 0x2c, 0xff, 0x04, 0x58, 0x1f, 0x10, 0x1e,
|
||||
0xcc, 0xa0, 0xb1, 0x2c, 0x89, 0xd5, 0x13, 0xdb, 0xb0, 0x26, 0x1a, 0x6b, 0xc3, 0x5b, 0x75, 0xc6,
|
||||
0x1a, 0x57, 0x3a, 0x4b, 0xd3, 0x86, 0xdf, 0x11, 0x26, 0xe6, 0x02, 0x4c, 0x44, 0x5e, 0x17, 0x7c,
|
||||
0x8b, 0x15, 0x59, 0xbe, 0x4f, 0xd6, 0x96, 0xbb, 0x34, 0x7d, 0x4f, 0x87, 0x7b, 0xdd, 0x6a, 0xed,
|
||||
0x99, 0xbc, 0x1d, 0x5d, 0xc4, 0xa6, 0x77, 0xdb, 0xb9, 0x08, 0x9a, 0x1c, 0xc6, 0xc9, 0xcc, 0x9d,
|
||||
0xfa, 0x86, 0x30, 0x79, 0x54, 0xce, 0x8f, 0x17, 0xe3, 0x44, 0xe4, 0xc8, 0x7f, 0x4f, 0xf3, 0x6f,
|
||||
0x0f, 0x23, 0x57, 0xfe, 0x63, 0xf8, 0xcd, 0xa9, 0x20, 0x9f, 0x7c, 0x41, 0x98, 0x98, 0xce, 0x74,
|
||||
0x3b, 0x8e, 0x3f, 0x3d, 0xb6, 0xee, 0xb9, 0xa3, 0xc1, 0xa7, 0xfe, 0x29, 0x80, 0x93, 0xcf, 0x08,
|
||||
0x5f, 0x88, 0xea, 0xfc, 0x99, 0xa1, 0x98, 0x89, 0x17, 0x92, 0xdc, 0xb4, 0x11, 0x1f, 0xd5, 0xdb,
|
||||
0x68, 0xef, 0x6b, 0xda, 0x1d, 0x6f, 0x4b, 0xd3, 0x9a, 0x57, 0xc3, 0xa9, 0xe3, 0xe3, 0x23, 0xe9,
|
||||
0xc6, 0xc8, 0x8f, 0xde, 0x20, 0xec, 0x25, 0xa2, 0xb0, 0x70, 0x46, 0xc3, 0xbf, 0x3d, 0x32, 0x72,
|
||||
0xb7, 0xc1, 0xdb, 0x45, 0x8f, 0x27, 0xad, 0x3b, 0x15, 0x39, 0xe3, 0x69, 0x20, 0xaa, 0x94, 0xa6,
|
||||
0xc0, 0x35, 0x3c, 0x35, 0x4b, 0xac, 0xcc, 0xe4, 0xbf, 0xde, 0xc1, 0x75, 0x3d, 0x88, 0xcf, 0x6a,
|
||||
0xfd, 0xca, 0xaf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x03, 0x65, 0xb4, 0xe0, 0xeb, 0x07, 0x00, 0x00,
|
||||
}
|
||||
|
@@ -3,24 +3,32 @@
|
||||
|
||||
package table
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type CreateTableRequest struct {
|
||||
// The unique name of the cluster in which to create the new table.
|
||||
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
// The name by which the new table should be referred to within the cluster,
|
||||
// e.g. "foobar" rather than "<cluster_name>/tables/foobar".
|
||||
TableId string `protobuf:"bytes,2,opt,name=table_id,json=tableId" json:"table_id,omitempty"`
|
||||
TableId string `protobuf:"bytes,2,opt,name=table_id,json=tableId,proto3" json:"table_id,omitempty"`
|
||||
// The Table to create. The `name` field of the Table and all of its
|
||||
// ColumnFamilies must be left blank, and will be populated in the response.
|
||||
Table *Table `protobuf:"bytes,3,opt,name=table" json:"table,omitempty"`
|
||||
Table *Table `protobuf:"bytes,3,opt,name=table,proto3" json:"table,omitempty"`
|
||||
// The optional list of row keys that will be used to initially split the
|
||||
// table into several tablets (Tablets are similar to HBase regions).
|
||||
// Given two split keys, "s1" and "s2", three tablets will be created,
|
||||
@@ -36,13 +44,36 @@ type CreateTableRequest struct {
|
||||
// - Tablet 3 [customer_1, customer_2) => {"customer_1"}.
|
||||
// - Tablet 4 [customer_2, other) => {"customer_2"}.
|
||||
// - Tablet 5 [other, ) => {"other", "zz"}.
|
||||
InitialSplitKeys []string `protobuf:"bytes,4,rep,name=initial_split_keys,json=initialSplitKeys" json:"initial_split_keys,omitempty"`
|
||||
InitialSplitKeys []string `protobuf:"bytes,4,rep,name=initial_split_keys,json=initialSplitKeys,proto3" json:"initial_split_keys,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *CreateTableRequest) Reset() { *m = CreateTableRequest{} }
|
||||
func (m *CreateTableRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*CreateTableRequest) ProtoMessage() {}
|
||||
func (*CreateTableRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} }
|
||||
func (m *CreateTableRequest) Reset() { *m = CreateTableRequest{} }
|
||||
func (m *CreateTableRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*CreateTableRequest) ProtoMessage() {}
|
||||
func (*CreateTableRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a159d72e7e8b0be6, []int{0}
|
||||
}
|
||||
|
||||
func (m *CreateTableRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_CreateTableRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *CreateTableRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_CreateTableRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *CreateTableRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_CreateTableRequest.Merge(m, src)
|
||||
}
|
||||
func (m *CreateTableRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_CreateTableRequest.Size(m)
|
||||
}
|
||||
func (m *CreateTableRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_CreateTableRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_CreateTableRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *CreateTableRequest) GetName() string {
|
||||
if m != nil {
|
||||
@@ -74,13 +105,36 @@ func (m *CreateTableRequest) GetInitialSplitKeys() []string {
|
||||
|
||||
type ListTablesRequest struct {
|
||||
// The unique name of the cluster for which tables should be listed.
|
||||
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ListTablesRequest) Reset() { *m = ListTablesRequest{} }
|
||||
func (m *ListTablesRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*ListTablesRequest) ProtoMessage() {}
|
||||
func (*ListTablesRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} }
|
||||
func (m *ListTablesRequest) Reset() { *m = ListTablesRequest{} }
|
||||
func (m *ListTablesRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*ListTablesRequest) ProtoMessage() {}
|
||||
func (*ListTablesRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a159d72e7e8b0be6, []int{1}
|
||||
}
|
||||
|
||||
func (m *ListTablesRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ListTablesRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ListTablesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ListTablesRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ListTablesRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ListTablesRequest.Merge(m, src)
|
||||
}
|
||||
func (m *ListTablesRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_ListTablesRequest.Size(m)
|
||||
}
|
||||
func (m *ListTablesRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ListTablesRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ListTablesRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *ListTablesRequest) GetName() string {
|
||||
if m != nil {
|
||||
@@ -92,13 +146,36 @@ func (m *ListTablesRequest) GetName() string {
|
||||
type ListTablesResponse struct {
|
||||
// The tables present in the requested cluster.
|
||||
// At present, only the names of the tables are populated.
|
||||
Tables []*Table `protobuf:"bytes,1,rep,name=tables" json:"tables,omitempty"`
|
||||
Tables []*Table `protobuf:"bytes,1,rep,name=tables,proto3" json:"tables,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ListTablesResponse) Reset() { *m = ListTablesResponse{} }
|
||||
func (m *ListTablesResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*ListTablesResponse) ProtoMessage() {}
|
||||
func (*ListTablesResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{2} }
|
||||
func (m *ListTablesResponse) Reset() { *m = ListTablesResponse{} }
|
||||
func (m *ListTablesResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*ListTablesResponse) ProtoMessage() {}
|
||||
func (*ListTablesResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a159d72e7e8b0be6, []int{2}
|
||||
}
|
||||
|
||||
func (m *ListTablesResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ListTablesResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ListTablesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ListTablesResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ListTablesResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ListTablesResponse.Merge(m, src)
|
||||
}
|
||||
func (m *ListTablesResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_ListTablesResponse.Size(m)
|
||||
}
|
||||
func (m *ListTablesResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ListTablesResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ListTablesResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *ListTablesResponse) GetTables() []*Table {
|
||||
if m != nil {
|
||||
@@ -109,13 +186,36 @@ func (m *ListTablesResponse) GetTables() []*Table {
|
||||
|
||||
type GetTableRequest struct {
|
||||
// The unique name of the requested table.
|
||||
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *GetTableRequest) Reset() { *m = GetTableRequest{} }
|
||||
func (m *GetTableRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*GetTableRequest) ProtoMessage() {}
|
||||
func (*GetTableRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{3} }
|
||||
func (m *GetTableRequest) Reset() { *m = GetTableRequest{} }
|
||||
func (m *GetTableRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*GetTableRequest) ProtoMessage() {}
|
||||
func (*GetTableRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a159d72e7e8b0be6, []int{3}
|
||||
}
|
||||
|
||||
func (m *GetTableRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_GetTableRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *GetTableRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_GetTableRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *GetTableRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_GetTableRequest.Merge(m, src)
|
||||
}
|
||||
func (m *GetTableRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_GetTableRequest.Size(m)
|
||||
}
|
||||
func (m *GetTableRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_GetTableRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_GetTableRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *GetTableRequest) GetName() string {
|
||||
if m != nil {
|
||||
@@ -126,13 +226,36 @@ func (m *GetTableRequest) GetName() string {
|
||||
|
||||
type DeleteTableRequest struct {
|
||||
// The unique name of the table to be deleted.
|
||||
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *DeleteTableRequest) Reset() { *m = DeleteTableRequest{} }
|
||||
func (m *DeleteTableRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*DeleteTableRequest) ProtoMessage() {}
|
||||
func (*DeleteTableRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{4} }
|
||||
func (m *DeleteTableRequest) Reset() { *m = DeleteTableRequest{} }
|
||||
func (m *DeleteTableRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*DeleteTableRequest) ProtoMessage() {}
|
||||
func (*DeleteTableRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a159d72e7e8b0be6, []int{4}
|
||||
}
|
||||
|
||||
func (m *DeleteTableRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_DeleteTableRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *DeleteTableRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_DeleteTableRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *DeleteTableRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_DeleteTableRequest.Merge(m, src)
|
||||
}
|
||||
func (m *DeleteTableRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_DeleteTableRequest.Size(m)
|
||||
}
|
||||
func (m *DeleteTableRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_DeleteTableRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_DeleteTableRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *DeleteTableRequest) GetName() string {
|
||||
if m != nil {
|
||||
@@ -143,16 +266,39 @@ func (m *DeleteTableRequest) GetName() string {
|
||||
|
||||
type RenameTableRequest struct {
|
||||
// The current unique name of the table.
|
||||
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
// The new name by which the table should be referred to within its containing
|
||||
// cluster, e.g. "foobar" rather than "<cluster_name>/tables/foobar".
|
||||
NewId string `protobuf:"bytes,2,opt,name=new_id,json=newId" json:"new_id,omitempty"`
|
||||
NewId string `protobuf:"bytes,2,opt,name=new_id,json=newId,proto3" json:"new_id,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *RenameTableRequest) Reset() { *m = RenameTableRequest{} }
|
||||
func (m *RenameTableRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*RenameTableRequest) ProtoMessage() {}
|
||||
func (*RenameTableRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{5} }
|
||||
func (m *RenameTableRequest) Reset() { *m = RenameTableRequest{} }
|
||||
func (m *RenameTableRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*RenameTableRequest) ProtoMessage() {}
|
||||
func (*RenameTableRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a159d72e7e8b0be6, []int{5}
|
||||
}
|
||||
|
||||
func (m *RenameTableRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_RenameTableRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *RenameTableRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_RenameTableRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *RenameTableRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_RenameTableRequest.Merge(m, src)
|
||||
}
|
||||
func (m *RenameTableRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_RenameTableRequest.Size(m)
|
||||
}
|
||||
func (m *RenameTableRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_RenameTableRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_RenameTableRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *RenameTableRequest) GetName() string {
|
||||
if m != nil {
|
||||
@@ -170,18 +316,41 @@ func (m *RenameTableRequest) GetNewId() string {
|
||||
|
||||
type CreateColumnFamilyRequest struct {
|
||||
// The unique name of the table in which to create the new column family.
|
||||
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
// The name by which the new column family should be referred to within the
|
||||
// table, e.g. "foobar" rather than "<table_name>/columnFamilies/foobar".
|
||||
ColumnFamilyId string `protobuf:"bytes,2,opt,name=column_family_id,json=columnFamilyId" json:"column_family_id,omitempty"`
|
||||
ColumnFamilyId string `protobuf:"bytes,2,opt,name=column_family_id,json=columnFamilyId,proto3" json:"column_family_id,omitempty"`
|
||||
// The column family to create. The `name` field must be left blank.
|
||||
ColumnFamily *ColumnFamily `protobuf:"bytes,3,opt,name=column_family,json=columnFamily" json:"column_family,omitempty"`
|
||||
ColumnFamily *ColumnFamily `protobuf:"bytes,3,opt,name=column_family,json=columnFamily,proto3" json:"column_family,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *CreateColumnFamilyRequest) Reset() { *m = CreateColumnFamilyRequest{} }
|
||||
func (m *CreateColumnFamilyRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*CreateColumnFamilyRequest) ProtoMessage() {}
|
||||
func (*CreateColumnFamilyRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{6} }
|
||||
func (m *CreateColumnFamilyRequest) Reset() { *m = CreateColumnFamilyRequest{} }
|
||||
func (m *CreateColumnFamilyRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*CreateColumnFamilyRequest) ProtoMessage() {}
|
||||
func (*CreateColumnFamilyRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a159d72e7e8b0be6, []int{6}
|
||||
}
|
||||
|
||||
func (m *CreateColumnFamilyRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_CreateColumnFamilyRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *CreateColumnFamilyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_CreateColumnFamilyRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *CreateColumnFamilyRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_CreateColumnFamilyRequest.Merge(m, src)
|
||||
}
|
||||
func (m *CreateColumnFamilyRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_CreateColumnFamilyRequest.Size(m)
|
||||
}
|
||||
func (m *CreateColumnFamilyRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_CreateColumnFamilyRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_CreateColumnFamilyRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *CreateColumnFamilyRequest) GetName() string {
|
||||
if m != nil {
|
||||
@@ -206,13 +375,36 @@ func (m *CreateColumnFamilyRequest) GetColumnFamily() *ColumnFamily {
|
||||
|
||||
type DeleteColumnFamilyRequest struct {
|
||||
// The unique name of the column family to be deleted.
|
||||
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *DeleteColumnFamilyRequest) Reset() { *m = DeleteColumnFamilyRequest{} }
|
||||
func (m *DeleteColumnFamilyRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*DeleteColumnFamilyRequest) ProtoMessage() {}
|
||||
func (*DeleteColumnFamilyRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{7} }
|
||||
func (m *DeleteColumnFamilyRequest) Reset() { *m = DeleteColumnFamilyRequest{} }
|
||||
func (m *DeleteColumnFamilyRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*DeleteColumnFamilyRequest) ProtoMessage() {}
|
||||
func (*DeleteColumnFamilyRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a159d72e7e8b0be6, []int{7}
|
||||
}
|
||||
|
||||
func (m *DeleteColumnFamilyRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_DeleteColumnFamilyRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *DeleteColumnFamilyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_DeleteColumnFamilyRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *DeleteColumnFamilyRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_DeleteColumnFamilyRequest.Merge(m, src)
|
||||
}
|
||||
func (m *DeleteColumnFamilyRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_DeleteColumnFamilyRequest.Size(m)
|
||||
}
|
||||
func (m *DeleteColumnFamilyRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_DeleteColumnFamilyRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_DeleteColumnFamilyRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *DeleteColumnFamilyRequest) GetName() string {
|
||||
if m != nil {
|
||||
@@ -223,17 +415,47 @@ func (m *DeleteColumnFamilyRequest) GetName() string {
|
||||
|
||||
type BulkDeleteRowsRequest struct {
|
||||
// The unique name of the table on which to perform the bulk delete
|
||||
TableName string `protobuf:"bytes,1,opt,name=table_name,json=tableName" json:"table_name,omitempty"`
|
||||
TableName string `protobuf:"bytes,1,opt,name=table_name,json=tableName,proto3" json:"table_name,omitempty"`
|
||||
// Types that are valid to be assigned to Target:
|
||||
// *BulkDeleteRowsRequest_RowKeyPrefix
|
||||
// *BulkDeleteRowsRequest_DeleteAllDataFromTable
|
||||
Target isBulkDeleteRowsRequest_Target `protobuf_oneof:"target"`
|
||||
Target isBulkDeleteRowsRequest_Target `protobuf_oneof:"target"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *BulkDeleteRowsRequest) Reset() { *m = BulkDeleteRowsRequest{} }
|
||||
func (m *BulkDeleteRowsRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*BulkDeleteRowsRequest) ProtoMessage() {}
|
||||
func (*BulkDeleteRowsRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{8} }
|
||||
func (m *BulkDeleteRowsRequest) Reset() { *m = BulkDeleteRowsRequest{} }
|
||||
func (m *BulkDeleteRowsRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*BulkDeleteRowsRequest) ProtoMessage() {}
|
||||
func (*BulkDeleteRowsRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a159d72e7e8b0be6, []int{8}
|
||||
}
|
||||
|
||||
func (m *BulkDeleteRowsRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_BulkDeleteRowsRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *BulkDeleteRowsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_BulkDeleteRowsRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *BulkDeleteRowsRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_BulkDeleteRowsRequest.Merge(m, src)
|
||||
}
|
||||
func (m *BulkDeleteRowsRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_BulkDeleteRowsRequest.Size(m)
|
||||
}
|
||||
func (m *BulkDeleteRowsRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_BulkDeleteRowsRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_BulkDeleteRowsRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *BulkDeleteRowsRequest) GetTableName() string {
|
||||
if m != nil {
|
||||
return m.TableName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type isBulkDeleteRowsRequest_Target interface {
|
||||
isBulkDeleteRowsRequest_Target()
|
||||
@@ -242,11 +464,13 @@ type isBulkDeleteRowsRequest_Target interface {
|
||||
type BulkDeleteRowsRequest_RowKeyPrefix struct {
|
||||
RowKeyPrefix []byte `protobuf:"bytes,2,opt,name=row_key_prefix,json=rowKeyPrefix,proto3,oneof"`
|
||||
}
|
||||
|
||||
type BulkDeleteRowsRequest_DeleteAllDataFromTable struct {
|
||||
DeleteAllDataFromTable bool `protobuf:"varint,3,opt,name=delete_all_data_from_table,json=deleteAllDataFromTable,oneof"`
|
||||
DeleteAllDataFromTable bool `protobuf:"varint,3,opt,name=delete_all_data_from_table,json=deleteAllDataFromTable,proto3,oneof"`
|
||||
}
|
||||
|
||||
func (*BulkDeleteRowsRequest_RowKeyPrefix) isBulkDeleteRowsRequest_Target() {}
|
||||
func (*BulkDeleteRowsRequest_RowKeyPrefix) isBulkDeleteRowsRequest_Target() {}
|
||||
|
||||
func (*BulkDeleteRowsRequest_DeleteAllDataFromTable) isBulkDeleteRowsRequest_Target() {}
|
||||
|
||||
func (m *BulkDeleteRowsRequest) GetTarget() isBulkDeleteRowsRequest_Target {
|
||||
@@ -256,13 +480,6 @@ func (m *BulkDeleteRowsRequest) GetTarget() isBulkDeleteRowsRequest_Target {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *BulkDeleteRowsRequest) GetTableName() string {
|
||||
if m != nil {
|
||||
return m.TableName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *BulkDeleteRowsRequest) GetRowKeyPrefix() []byte {
|
||||
if x, ok := m.GetTarget().(*BulkDeleteRowsRequest_RowKeyPrefix); ok {
|
||||
return x.RowKeyPrefix
|
||||
@@ -333,11 +550,11 @@ func _BulkDeleteRowsRequest_OneofSizer(msg proto.Message) (n int) {
|
||||
// target
|
||||
switch x := m.Target.(type) {
|
||||
case *BulkDeleteRowsRequest_RowKeyPrefix:
|
||||
n += proto.SizeVarint(2<<3 | proto.WireBytes)
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(len(x.RowKeyPrefix)))
|
||||
n += len(x.RowKeyPrefix)
|
||||
case *BulkDeleteRowsRequest_DeleteAllDataFromTable:
|
||||
n += proto.SizeVarint(3<<3 | proto.WireVarint)
|
||||
n += 1 // tag and wire
|
||||
n += 1
|
||||
case nil:
|
||||
default:
|
||||
@@ -359,10 +576,10 @@ func init() {
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("google/bigtable/admin/table/v1/bigtable_table_service_messages.proto", fileDescriptor2)
|
||||
proto.RegisterFile("google/bigtable/admin/table/v1/bigtable_table_service_messages.proto", fileDescriptor_a159d72e7e8b0be6)
|
||||
}
|
||||
|
||||
var fileDescriptor2 = []byte{
|
||||
var fileDescriptor_a159d72e7e8b0be6 = []byte{
|
||||
// 514 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0xc1, 0x6e, 0xd3, 0x40,
|
||||
0x10, 0xad, 0x49, 0x1b, 0x92, 0x21, 0x94, 0xb2, 0x52, 0x51, 0x52, 0x09, 0x14, 0x56, 0x2a, 0xe4,
|
||||
|
1404
vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_instance_admin.pb.go
generated
vendored
1404
vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_instance_admin.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
1325
vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_table_admin.pb.go
generated
vendored
1325
vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_table_admin.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
34
vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/common.pb.go
generated
vendored
34
vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/common.pb.go
generated
vendored
@@ -3,17 +3,25 @@
|
||||
|
||||
package admin
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import _ "google.golang.org/genproto/googleapis/api/annotations"
|
||||
import _ "github.com/golang/protobuf/ptypes/timestamp"
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
_ "github.com/golang/protobuf/ptypes/timestamp"
|
||||
_ "google.golang.org/genproto/googleapis/api/annotations"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// Storage media types for persisting Bigtable data.
|
||||
type StorageType int32
|
||||
|
||||
@@ -31,24 +39,30 @@ var StorageType_name = map[int32]string{
|
||||
1: "SSD",
|
||||
2: "HDD",
|
||||
}
|
||||
|
||||
var StorageType_value = map[string]int32{
|
||||
"STORAGE_TYPE_UNSPECIFIED": 0,
|
||||
"SSD": 1,
|
||||
"HDD": 2,
|
||||
"SSD": 1,
|
||||
"HDD": 2,
|
||||
}
|
||||
|
||||
func (x StorageType) String() string {
|
||||
return proto.EnumName(StorageType_name, int32(x))
|
||||
}
|
||||
func (StorageType) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{0} }
|
||||
|
||||
func (StorageType) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_79ab584e3c858108, []int{0}
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterEnum("google.bigtable.admin.v2.StorageType", StorageType_name, StorageType_value)
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("google/bigtable/admin/v2/common.proto", fileDescriptor2) }
|
||||
func init() {
|
||||
proto.RegisterFile("google/bigtable/admin/v2/common.proto", fileDescriptor_79ab584e3c858108)
|
||||
}
|
||||
|
||||
var fileDescriptor2 = []byte{
|
||||
var fileDescriptor_79ab584e3c858108 = []byte{
|
||||
// 270 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0xd0, 0xcf, 0x4b, 0xc3, 0x30,
|
||||
0x14, 0x07, 0x70, 0x3b, 0x41, 0x21, 0xbb, 0x94, 0x9e, 0xc6, 0x28, 0x7a, 0xf2, 0xe2, 0x21, 0x81,
|
||||
|
261
vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/instance.pb.go
generated
vendored
261
vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/instance.pb.go
generated
vendored
@@ -3,16 +3,24 @@
|
||||
|
||||
package admin
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import _ "google.golang.org/genproto/googleapis/api/annotations"
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
_ "google.golang.org/genproto/googleapis/api/annotations"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// Possible states of an instance.
|
||||
type Instance_State int32
|
||||
|
||||
@@ -32,6 +40,7 @@ var Instance_State_name = map[int32]string{
|
||||
1: "READY",
|
||||
2: "CREATING",
|
||||
}
|
||||
|
||||
var Instance_State_value = map[string]int32{
|
||||
"STATE_NOT_KNOWN": 0,
|
||||
"READY": 1,
|
||||
@@ -41,7 +50,10 @@ var Instance_State_value = map[string]int32{
|
||||
func (x Instance_State) String() string {
|
||||
return proto.EnumName(Instance_State_name, int32(x))
|
||||
}
|
||||
func (Instance_State) EnumDescriptor() ([]byte, []int) { return fileDescriptor3, []int{0, 0} }
|
||||
|
||||
func (Instance_State) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_712127d2a900984d, []int{0, 0}
|
||||
}
|
||||
|
||||
// The type of the instance.
|
||||
type Instance_Type int32
|
||||
@@ -69,6 +81,7 @@ var Instance_Type_name = map[int32]string{
|
||||
1: "PRODUCTION",
|
||||
2: "DEVELOPMENT",
|
||||
}
|
||||
|
||||
var Instance_Type_value = map[string]int32{
|
||||
"TYPE_UNSPECIFIED": 0,
|
||||
"PRODUCTION": 1,
|
||||
@@ -78,7 +91,10 @@ var Instance_Type_value = map[string]int32{
|
||||
func (x Instance_Type) String() string {
|
||||
return proto.EnumName(Instance_Type_name, int32(x))
|
||||
}
|
||||
func (Instance_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor3, []int{0, 1} }
|
||||
|
||||
func (Instance_Type) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_712127d2a900984d, []int{0, 1}
|
||||
}
|
||||
|
||||
// Possible states of a cluster.
|
||||
type Cluster_State int32
|
||||
@@ -110,6 +126,7 @@ var Cluster_State_name = map[int32]string{
|
||||
3: "RESIZING",
|
||||
4: "DISABLED",
|
||||
}
|
||||
|
||||
var Cluster_State_value = map[string]int32{
|
||||
"STATE_NOT_KNOWN": 0,
|
||||
"READY": 1,
|
||||
@@ -121,7 +138,10 @@ var Cluster_State_value = map[string]int32{
|
||||
func (x Cluster_State) String() string {
|
||||
return proto.EnumName(Cluster_State_name, int32(x))
|
||||
}
|
||||
func (Cluster_State) EnumDescriptor() ([]byte, []int) { return fileDescriptor3, []int{1, 0} }
|
||||
|
||||
func (Cluster_State) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_712127d2a900984d, []int{1, 0}
|
||||
}
|
||||
|
||||
// A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and
|
||||
// the resources that serve them.
|
||||
@@ -131,16 +151,16 @@ type Instance struct {
|
||||
// (`OutputOnly`)
|
||||
// The unique name of the instance. Values are of the form
|
||||
// `projects/<project>/instances/[a-z][a-z0-9\\-]+[a-z0-9]`.
|
||||
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
// The descriptive name for this instance as it appears in UIs.
|
||||
// Can be changed at any time, but should be kept globally unique
|
||||
// to avoid confusion.
|
||||
DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName" json:"display_name,omitempty"`
|
||||
DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
|
||||
// (`OutputOnly`)
|
||||
// The current state of the instance.
|
||||
State Instance_State `protobuf:"varint,3,opt,name=state,enum=google.bigtable.admin.v2.Instance_State" json:"state,omitempty"`
|
||||
State Instance_State `protobuf:"varint,3,opt,name=state,proto3,enum=google.bigtable.admin.v2.Instance_State" json:"state,omitempty"`
|
||||
// The type of the instance. Defaults to `PRODUCTION`.
|
||||
Type Instance_Type `protobuf:"varint,4,opt,name=type,enum=google.bigtable.admin.v2.Instance_Type" json:"type,omitempty"`
|
||||
Type Instance_Type `protobuf:"varint,4,opt,name=type,proto3,enum=google.bigtable.admin.v2.Instance_Type" json:"type,omitempty"`
|
||||
// Labels are a flexible and lightweight mechanism for organizing cloud
|
||||
// resources into groups that reflect a customer's organizational needs and
|
||||
// deployment strategies. They can be used to filter resources and aggregate
|
||||
@@ -152,13 +172,36 @@ type Instance struct {
|
||||
// the regular expression: `[\p{Ll}\p{Lo}\p{N}_-]{0,63}`.
|
||||
// * No more than 64 labels can be associated with a given resource.
|
||||
// * Keys and values must both be under 128 bytes.
|
||||
Labels map[string]string `protobuf:"bytes,5,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
||||
Labels map[string]string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Instance) Reset() { *m = Instance{} }
|
||||
func (m *Instance) String() string { return proto.CompactTextString(m) }
|
||||
func (*Instance) ProtoMessage() {}
|
||||
func (*Instance) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} }
|
||||
func (m *Instance) Reset() { *m = Instance{} }
|
||||
func (m *Instance) String() string { return proto.CompactTextString(m) }
|
||||
func (*Instance) ProtoMessage() {}
|
||||
func (*Instance) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_712127d2a900984d, []int{0}
|
||||
}
|
||||
|
||||
func (m *Instance) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Instance.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Instance) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Instance.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Instance) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Instance.Merge(m, src)
|
||||
}
|
||||
func (m *Instance) XXX_Size() int {
|
||||
return xxx_messageInfo_Instance.Size(m)
|
||||
}
|
||||
func (m *Instance) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Instance.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Instance proto.InternalMessageInfo
|
||||
|
||||
func (m *Instance) GetName() string {
|
||||
if m != nil {
|
||||
@@ -202,29 +245,52 @@ type Cluster struct {
|
||||
// (`OutputOnly`)
|
||||
// The unique name of the cluster. Values are of the form
|
||||
// `projects/<project>/instances/<instance>/clusters/[a-z][-a-z0-9]*`.
|
||||
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
// (`CreationOnly`)
|
||||
// The location where this cluster's nodes and storage reside. For best
|
||||
// performance, clients should be located as close as possible to this
|
||||
// cluster. Currently only zones are supported, so values should be of the
|
||||
// form `projects/<project>/locations/<zone>`.
|
||||
Location string `protobuf:"bytes,2,opt,name=location" json:"location,omitempty"`
|
||||
Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"`
|
||||
// (`OutputOnly`)
|
||||
// The current state of the cluster.
|
||||
State Cluster_State `protobuf:"varint,3,opt,name=state,enum=google.bigtable.admin.v2.Cluster_State" json:"state,omitempty"`
|
||||
State Cluster_State `protobuf:"varint,3,opt,name=state,proto3,enum=google.bigtable.admin.v2.Cluster_State" json:"state,omitempty"`
|
||||
// The number of nodes allocated to this cluster. More nodes enable higher
|
||||
// throughput and more consistent performance.
|
||||
ServeNodes int32 `protobuf:"varint,4,opt,name=serve_nodes,json=serveNodes" json:"serve_nodes,omitempty"`
|
||||
ServeNodes int32 `protobuf:"varint,4,opt,name=serve_nodes,json=serveNodes,proto3" json:"serve_nodes,omitempty"`
|
||||
// (`CreationOnly`)
|
||||
// The type of storage used by this cluster to serve its
|
||||
// parent instance's tables, unless explicitly overridden.
|
||||
DefaultStorageType StorageType `protobuf:"varint,5,opt,name=default_storage_type,json=defaultStorageType,enum=google.bigtable.admin.v2.StorageType" json:"default_storage_type,omitempty"`
|
||||
DefaultStorageType StorageType `protobuf:"varint,5,opt,name=default_storage_type,json=defaultStorageType,proto3,enum=google.bigtable.admin.v2.StorageType" json:"default_storage_type,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Cluster) Reset() { *m = Cluster{} }
|
||||
func (m *Cluster) String() string { return proto.CompactTextString(m) }
|
||||
func (*Cluster) ProtoMessage() {}
|
||||
func (*Cluster) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{1} }
|
||||
func (m *Cluster) Reset() { *m = Cluster{} }
|
||||
func (m *Cluster) String() string { return proto.CompactTextString(m) }
|
||||
func (*Cluster) ProtoMessage() {}
|
||||
func (*Cluster) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_712127d2a900984d, []int{1}
|
||||
}
|
||||
|
||||
func (m *Cluster) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Cluster.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Cluster) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Cluster.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Cluster) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Cluster.Merge(m, src)
|
||||
}
|
||||
func (m *Cluster) XXX_Size() int {
|
||||
return xxx_messageInfo_Cluster.Size(m)
|
||||
}
|
||||
func (m *Cluster) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Cluster.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Cluster proto.InternalMessageInfo
|
||||
|
||||
func (m *Cluster) GetName() string {
|
||||
if m != nil {
|
||||
@@ -261,18 +327,13 @@ func (m *Cluster) GetDefaultStorageType() StorageType {
|
||||
return StorageType_STORAGE_TYPE_UNSPECIFIED
|
||||
}
|
||||
|
||||
// This is a private alpha release of Cloud Bigtable replication. This feature
|
||||
// is not currently available to most Cloud Bigtable customers. This feature
|
||||
// might be changed in backward-incompatible ways and is not recommended for
|
||||
// production use. It is not subject to any SLA or deprecation policy.
|
||||
//
|
||||
// A configuration object describing how Cloud Bigtable should treat traffic
|
||||
// from a particular end user application.
|
||||
type AppProfile struct {
|
||||
// (`OutputOnly`)
|
||||
// The unique name of the app profile. Values are of the form
|
||||
// `projects/<project>/instances/<instance>/appProfiles/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`.
|
||||
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
// Strongly validated etag for optimistic concurrency control. Preserve the
|
||||
// value returned from `GetAppProfile` when calling `UpdateAppProfile` to
|
||||
// fail the request if there has been a modification in the mean time. The
|
||||
@@ -281,43 +342,45 @@ type AppProfile struct {
|
||||
// See [Wikipedia](https://en.wikipedia.org/wiki/HTTP_ETag) and
|
||||
// [RFC 7232](https://tools.ietf.org/html/rfc7232#section-2.3) for more
|
||||
// details.
|
||||
Etag string `protobuf:"bytes,2,opt,name=etag" json:"etag,omitempty"`
|
||||
Etag string `protobuf:"bytes,2,opt,name=etag,proto3" json:"etag,omitempty"`
|
||||
// Optional long form description of the use case for this AppProfile.
|
||||
Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"`
|
||||
Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
|
||||
// The routing policy for all read/write requests which use this app profile.
|
||||
// A value must be explicitly set.
|
||||
//
|
||||
// Types that are valid to be assigned to RoutingPolicy:
|
||||
// *AppProfile_MultiClusterRoutingUseAny_
|
||||
// *AppProfile_SingleClusterRouting_
|
||||
RoutingPolicy isAppProfile_RoutingPolicy `protobuf_oneof:"routing_policy"`
|
||||
RoutingPolicy isAppProfile_RoutingPolicy `protobuf_oneof:"routing_policy"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *AppProfile) Reset() { *m = AppProfile{} }
|
||||
func (m *AppProfile) String() string { return proto.CompactTextString(m) }
|
||||
func (*AppProfile) ProtoMessage() {}
|
||||
func (*AppProfile) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{2} }
|
||||
|
||||
type isAppProfile_RoutingPolicy interface {
|
||||
isAppProfile_RoutingPolicy()
|
||||
func (m *AppProfile) Reset() { *m = AppProfile{} }
|
||||
func (m *AppProfile) String() string { return proto.CompactTextString(m) }
|
||||
func (*AppProfile) ProtoMessage() {}
|
||||
func (*AppProfile) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_712127d2a900984d, []int{2}
|
||||
}
|
||||
|
||||
type AppProfile_MultiClusterRoutingUseAny_ struct {
|
||||
MultiClusterRoutingUseAny *AppProfile_MultiClusterRoutingUseAny `protobuf:"bytes,5,opt,name=multi_cluster_routing_use_any,json=multiClusterRoutingUseAny,oneof"`
|
||||
func (m *AppProfile) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_AppProfile.Unmarshal(m, b)
|
||||
}
|
||||
type AppProfile_SingleClusterRouting_ struct {
|
||||
SingleClusterRouting *AppProfile_SingleClusterRouting `protobuf:"bytes,6,opt,name=single_cluster_routing,json=singleClusterRouting,oneof"`
|
||||
func (m *AppProfile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_AppProfile.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *AppProfile) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_AppProfile.Merge(m, src)
|
||||
}
|
||||
func (m *AppProfile) XXX_Size() int {
|
||||
return xxx_messageInfo_AppProfile.Size(m)
|
||||
}
|
||||
func (m *AppProfile) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_AppProfile.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
func (*AppProfile_MultiClusterRoutingUseAny_) isAppProfile_RoutingPolicy() {}
|
||||
func (*AppProfile_SingleClusterRouting_) isAppProfile_RoutingPolicy() {}
|
||||
|
||||
func (m *AppProfile) GetRoutingPolicy() isAppProfile_RoutingPolicy {
|
||||
if m != nil {
|
||||
return m.RoutingPolicy
|
||||
}
|
||||
return nil
|
||||
}
|
||||
var xxx_messageInfo_AppProfile proto.InternalMessageInfo
|
||||
|
||||
func (m *AppProfile) GetName() string {
|
||||
if m != nil {
|
||||
@@ -340,6 +403,29 @@ func (m *AppProfile) GetDescription() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
type isAppProfile_RoutingPolicy interface {
|
||||
isAppProfile_RoutingPolicy()
|
||||
}
|
||||
|
||||
type AppProfile_MultiClusterRoutingUseAny_ struct {
|
||||
MultiClusterRoutingUseAny *AppProfile_MultiClusterRoutingUseAny `protobuf:"bytes,5,opt,name=multi_cluster_routing_use_any,json=multiClusterRoutingUseAny,proto3,oneof"`
|
||||
}
|
||||
|
||||
type AppProfile_SingleClusterRouting_ struct {
|
||||
SingleClusterRouting *AppProfile_SingleClusterRouting `protobuf:"bytes,6,opt,name=single_cluster_routing,json=singleClusterRouting,proto3,oneof"`
|
||||
}
|
||||
|
||||
func (*AppProfile_MultiClusterRoutingUseAny_) isAppProfile_RoutingPolicy() {}
|
||||
|
||||
func (*AppProfile_SingleClusterRouting_) isAppProfile_RoutingPolicy() {}
|
||||
|
||||
func (m *AppProfile) GetRoutingPolicy() isAppProfile_RoutingPolicy {
|
||||
if m != nil {
|
||||
return m.RoutingPolicy
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *AppProfile) GetMultiClusterRoutingUseAny() *AppProfile_MultiClusterRoutingUseAny {
|
||||
if x, ok := m.GetRoutingPolicy().(*AppProfile_MultiClusterRoutingUseAny_); ok {
|
||||
return x.MultiClusterRoutingUseAny
|
||||
@@ -413,12 +499,12 @@ func _AppProfile_OneofSizer(msg proto.Message) (n int) {
|
||||
switch x := m.RoutingPolicy.(type) {
|
||||
case *AppProfile_MultiClusterRoutingUseAny_:
|
||||
s := proto.Size(x.MultiClusterRoutingUseAny)
|
||||
n += proto.SizeVarint(5<<3 | proto.WireBytes)
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(s))
|
||||
n += s
|
||||
case *AppProfile_SingleClusterRouting_:
|
||||
s := proto.Size(x.SingleClusterRouting)
|
||||
n += proto.SizeVarint(6<<3 | proto.WireBytes)
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(s))
|
||||
n += s
|
||||
case nil:
|
||||
@@ -433,34 +519,76 @@ func _AppProfile_OneofSizer(msg proto.Message) (n int) {
|
||||
// Choosing this option sacrifices read-your-writes consistency to improve
|
||||
// availability.
|
||||
type AppProfile_MultiClusterRoutingUseAny struct {
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *AppProfile_MultiClusterRoutingUseAny) Reset() { *m = AppProfile_MultiClusterRoutingUseAny{} }
|
||||
func (m *AppProfile_MultiClusterRoutingUseAny) String() string { return proto.CompactTextString(m) }
|
||||
func (*AppProfile_MultiClusterRoutingUseAny) ProtoMessage() {}
|
||||
func (*AppProfile_MultiClusterRoutingUseAny) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor3, []int{2, 0}
|
||||
return fileDescriptor_712127d2a900984d, []int{2, 0}
|
||||
}
|
||||
|
||||
func (m *AppProfile_MultiClusterRoutingUseAny) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_AppProfile_MultiClusterRoutingUseAny.Unmarshal(m, b)
|
||||
}
|
||||
func (m *AppProfile_MultiClusterRoutingUseAny) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_AppProfile_MultiClusterRoutingUseAny.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *AppProfile_MultiClusterRoutingUseAny) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_AppProfile_MultiClusterRoutingUseAny.Merge(m, src)
|
||||
}
|
||||
func (m *AppProfile_MultiClusterRoutingUseAny) XXX_Size() int {
|
||||
return xxx_messageInfo_AppProfile_MultiClusterRoutingUseAny.Size(m)
|
||||
}
|
||||
func (m *AppProfile_MultiClusterRoutingUseAny) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_AppProfile_MultiClusterRoutingUseAny.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_AppProfile_MultiClusterRoutingUseAny proto.InternalMessageInfo
|
||||
|
||||
// Unconditionally routes all read/write requests to a specific cluster.
|
||||
// This option preserves read-your-writes consistency, but does not improve
|
||||
// availability.
|
||||
type AppProfile_SingleClusterRouting struct {
|
||||
// The cluster to which read/write requests should be routed.
|
||||
ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId" json:"cluster_id,omitempty"`
|
||||
ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"`
|
||||
// Whether or not `CheckAndMutateRow` and `ReadModifyWriteRow` requests are
|
||||
// allowed by this app profile. It is unsafe to send these requests to
|
||||
// the same table/row/column in multiple clusters.
|
||||
AllowTransactionalWrites bool `protobuf:"varint,2,opt,name=allow_transactional_writes,json=allowTransactionalWrites" json:"allow_transactional_writes,omitempty"`
|
||||
AllowTransactionalWrites bool `protobuf:"varint,2,opt,name=allow_transactional_writes,json=allowTransactionalWrites,proto3" json:"allow_transactional_writes,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *AppProfile_SingleClusterRouting) Reset() { *m = AppProfile_SingleClusterRouting{} }
|
||||
func (m *AppProfile_SingleClusterRouting) String() string { return proto.CompactTextString(m) }
|
||||
func (*AppProfile_SingleClusterRouting) ProtoMessage() {}
|
||||
func (*AppProfile_SingleClusterRouting) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor3, []int{2, 1}
|
||||
return fileDescriptor_712127d2a900984d, []int{2, 1}
|
||||
}
|
||||
|
||||
func (m *AppProfile_SingleClusterRouting) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_AppProfile_SingleClusterRouting.Unmarshal(m, b)
|
||||
}
|
||||
func (m *AppProfile_SingleClusterRouting) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_AppProfile_SingleClusterRouting.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *AppProfile_SingleClusterRouting) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_AppProfile_SingleClusterRouting.Merge(m, src)
|
||||
}
|
||||
func (m *AppProfile_SingleClusterRouting) XXX_Size() int {
|
||||
return xxx_messageInfo_AppProfile_SingleClusterRouting.Size(m)
|
||||
}
|
||||
func (m *AppProfile_SingleClusterRouting) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_AppProfile_SingleClusterRouting.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_AppProfile_SingleClusterRouting proto.InternalMessageInfo
|
||||
|
||||
func (m *AppProfile_SingleClusterRouting) GetClusterId() string {
|
||||
if m != nil {
|
||||
return m.ClusterId
|
||||
@@ -476,19 +604,22 @@ func (m *AppProfile_SingleClusterRouting) GetAllowTransactionalWrites() bool {
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterEnum("google.bigtable.admin.v2.Instance_State", Instance_State_name, Instance_State_value)
|
||||
proto.RegisterEnum("google.bigtable.admin.v2.Instance_Type", Instance_Type_name, Instance_Type_value)
|
||||
proto.RegisterEnum("google.bigtable.admin.v2.Cluster_State", Cluster_State_name, Cluster_State_value)
|
||||
proto.RegisterType((*Instance)(nil), "google.bigtable.admin.v2.Instance")
|
||||
proto.RegisterMapType((map[string]string)(nil), "google.bigtable.admin.v2.Instance.LabelsEntry")
|
||||
proto.RegisterType((*Cluster)(nil), "google.bigtable.admin.v2.Cluster")
|
||||
proto.RegisterType((*AppProfile)(nil), "google.bigtable.admin.v2.AppProfile")
|
||||
proto.RegisterType((*AppProfile_MultiClusterRoutingUseAny)(nil), "google.bigtable.admin.v2.AppProfile.MultiClusterRoutingUseAny")
|
||||
proto.RegisterType((*AppProfile_SingleClusterRouting)(nil), "google.bigtable.admin.v2.AppProfile.SingleClusterRouting")
|
||||
proto.RegisterEnum("google.bigtable.admin.v2.Instance_State", Instance_State_name, Instance_State_value)
|
||||
proto.RegisterEnum("google.bigtable.admin.v2.Instance_Type", Instance_Type_name, Instance_Type_value)
|
||||
proto.RegisterEnum("google.bigtable.admin.v2.Cluster_State", Cluster_State_name, Cluster_State_value)
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("google/bigtable/admin/v2/instance.proto", fileDescriptor3) }
|
||||
func init() {
|
||||
proto.RegisterFile("google/bigtable/admin/v2/instance.proto", fileDescriptor_712127d2a900984d)
|
||||
}
|
||||
|
||||
var fileDescriptor3 = []byte{
|
||||
var fileDescriptor_712127d2a900984d = []byte{
|
||||
// 765 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0xdd, 0x8e, 0xdb, 0x44,
|
||||
0x14, 0x8e, 0xf3, 0xb3, 0x64, 0x4f, 0xca, 0xd6, 0x1a, 0x22, 0x94, 0x86, 0x16, 0x42, 0xa4, 0xaa,
|
||||
|
391
vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/table.pb.go
generated
vendored
391
vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/table.pb.go
generated
vendored
@@ -3,18 +3,26 @@
|
||||
|
||||
package admin
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import _ "google.golang.org/genproto/googleapis/api/annotations"
|
||||
import google_protobuf5 "github.com/golang/protobuf/ptypes/duration"
|
||||
import google_protobuf1 "github.com/golang/protobuf/ptypes/timestamp"
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
duration "github.com/golang/protobuf/ptypes/duration"
|
||||
timestamp "github.com/golang/protobuf/ptypes/timestamp"
|
||||
_ "google.golang.org/genproto/googleapis/api/annotations"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// Possible timestamp granularities to use when keeping multiple versions
|
||||
// of data in a table.
|
||||
type Table_TimestampGranularity int32
|
||||
@@ -31,16 +39,18 @@ var Table_TimestampGranularity_name = map[int32]string{
|
||||
0: "TIMESTAMP_GRANULARITY_UNSPECIFIED",
|
||||
1: "MILLIS",
|
||||
}
|
||||
|
||||
var Table_TimestampGranularity_value = map[string]int32{
|
||||
"TIMESTAMP_GRANULARITY_UNSPECIFIED": 0,
|
||||
"MILLIS": 1,
|
||||
"MILLIS": 1,
|
||||
}
|
||||
|
||||
func (x Table_TimestampGranularity) String() string {
|
||||
return proto.EnumName(Table_TimestampGranularity_name, int32(x))
|
||||
}
|
||||
|
||||
func (Table_TimestampGranularity) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor4, []int{0, 0}
|
||||
return fileDescriptor_ea1666be9e81bb35, []int{0, 0}
|
||||
}
|
||||
|
||||
// Defines a view over a table's fields.
|
||||
@@ -53,12 +63,6 @@ const (
|
||||
Table_NAME_ONLY Table_View = 1
|
||||
// Only populates `name` and fields related to the table's schema.
|
||||
Table_SCHEMA_VIEW Table_View = 2
|
||||
// This is a private alpha release of Cloud Bigtable replication. This
|
||||
// feature is not currently available to most Cloud Bigtable customers. This
|
||||
// feature might be changed in backward-incompatible ways and is not
|
||||
// recommended for production use. It is not subject to any SLA or
|
||||
// deprecation policy.
|
||||
//
|
||||
// Only populates `name` and fields related to the table's
|
||||
// replication state.
|
||||
Table_REPLICATION_VIEW Table_View = 3
|
||||
@@ -73,6 +77,7 @@ var Table_View_name = map[int32]string{
|
||||
3: "REPLICATION_VIEW",
|
||||
4: "FULL",
|
||||
}
|
||||
|
||||
var Table_View_value = map[string]int32{
|
||||
"VIEW_UNSPECIFIED": 0,
|
||||
"NAME_ONLY": 1,
|
||||
@@ -84,7 +89,10 @@ var Table_View_value = map[string]int32{
|
||||
func (x Table_View) String() string {
|
||||
return proto.EnumName(Table_View_name, int32(x))
|
||||
}
|
||||
func (Table_View) EnumDescriptor() ([]byte, []int) { return fileDescriptor4, []int{0, 1} }
|
||||
|
||||
func (Table_View) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ea1666be9e81bb35, []int{0, 1}
|
||||
}
|
||||
|
||||
// Table replication states.
|
||||
type Table_ClusterState_ReplicationState int32
|
||||
@@ -94,21 +102,17 @@ const (
|
||||
Table_ClusterState_STATE_NOT_KNOWN Table_ClusterState_ReplicationState = 0
|
||||
// The cluster was recently created, and the table must finish copying
|
||||
// over pre-existing data from other clusters before it can begin
|
||||
// receiving live replication updates and serving
|
||||
// [Data API][google.bigtable.v2.Bigtable] requests.
|
||||
// receiving live replication updates and serving Data API requests.
|
||||
Table_ClusterState_INITIALIZING Table_ClusterState_ReplicationState = 1
|
||||
// The table is temporarily unable to serve
|
||||
// [Data API][google.bigtable.v2.Bigtable] requests from this
|
||||
// The table is temporarily unable to serve Data API requests from this
|
||||
// cluster due to planned internal maintenance.
|
||||
Table_ClusterState_PLANNED_MAINTENANCE Table_ClusterState_ReplicationState = 2
|
||||
// The table is temporarily unable to serve
|
||||
// [Data API][google.bigtable.v2.Bigtable] requests from this
|
||||
// The table is temporarily unable to serve Data API requests from this
|
||||
// cluster due to unplanned or emergency maintenance.
|
||||
Table_ClusterState_UNPLANNED_MAINTENANCE Table_ClusterState_ReplicationState = 3
|
||||
// The table can serve
|
||||
// [Data API][google.bigtable.v2.Bigtable] requests from this
|
||||
// cluster. Depending on replication delay, reads may not immediately
|
||||
// reflect the state of the table in other clusters.
|
||||
// The table can serve Data API requests from this cluster. Depending on
|
||||
// replication delay, reads may not immediately reflect the state of the
|
||||
// table in other clusters.
|
||||
Table_ClusterState_READY Table_ClusterState_ReplicationState = 4
|
||||
)
|
||||
|
||||
@@ -119,6 +123,7 @@ var Table_ClusterState_ReplicationState_name = map[int32]string{
|
||||
3: "UNPLANNED_MAINTENANCE",
|
||||
4: "READY",
|
||||
}
|
||||
|
||||
var Table_ClusterState_ReplicationState_value = map[string]int32{
|
||||
"STATE_NOT_KNOWN": 0,
|
||||
"INITIALIZING": 1,
|
||||
@@ -130,8 +135,9 @@ var Table_ClusterState_ReplicationState_value = map[string]int32{
|
||||
func (x Table_ClusterState_ReplicationState) String() string {
|
||||
return proto.EnumName(Table_ClusterState_ReplicationState_name, int32(x))
|
||||
}
|
||||
|
||||
func (Table_ClusterState_ReplicationState) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor4, []int{0, 0, 0}
|
||||
return fileDescriptor_ea1666be9e81bb35, []int{0, 0, 0}
|
||||
}
|
||||
|
||||
// Possible states of a snapshot.
|
||||
@@ -153,6 +159,7 @@ var Snapshot_State_name = map[int32]string{
|
||||
1: "READY",
|
||||
2: "CREATING",
|
||||
}
|
||||
|
||||
var Snapshot_State_value = map[string]int32{
|
||||
"STATE_NOT_KNOWN": 0,
|
||||
"READY": 1,
|
||||
@@ -162,7 +169,10 @@ var Snapshot_State_value = map[string]int32{
|
||||
func (x Snapshot_State) String() string {
|
||||
return proto.EnumName(Snapshot_State_name, int32(x))
|
||||
}
|
||||
func (Snapshot_State) EnumDescriptor() ([]byte, []int) { return fileDescriptor4, []int{3, 0} }
|
||||
|
||||
func (Snapshot_State) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ea1666be9e81bb35, []int{3, 0}
|
||||
}
|
||||
|
||||
// A collection of user data indexed by row, column, and timestamp.
|
||||
// Each table is served using the resources of its parent cluster.
|
||||
@@ -170,36 +180,54 @@ type Table struct {
|
||||
// (`OutputOnly`)
|
||||
// The unique name of the table. Values are of the form
|
||||
// `projects/<project>/instances/<instance>/tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`.
|
||||
// Views: `NAME_ONLY`, `SCHEMA_VIEW`, `FULL`
|
||||
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||
// This is a private alpha release of Cloud Bigtable replication. This feature
|
||||
// is not currently available to most Cloud Bigtable customers. This feature
|
||||
// might be changed in backward-incompatible ways and is not recommended for
|
||||
// production use. It is not subject to any SLA or deprecation policy.
|
||||
//
|
||||
// Views: `NAME_ONLY`, `SCHEMA_VIEW`, `REPLICATION_VIEW`, `FULL`
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
// (`OutputOnly`)
|
||||
// Map from cluster ID to per-cluster table state.
|
||||
// If it could not be determined whether or not the table has data in a
|
||||
// particular cluster (for example, if its zone is unavailable), then
|
||||
// there will be an entry for the cluster with UNKNOWN `replication_status`.
|
||||
// Views: `FULL`
|
||||
ClusterStates map[string]*Table_ClusterState `protobuf:"bytes,2,rep,name=cluster_states,json=clusterStates" json:"cluster_states,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
||||
// Views: `REPLICATION_VIEW`, `FULL`
|
||||
ClusterStates map[string]*Table_ClusterState `protobuf:"bytes,2,rep,name=cluster_states,json=clusterStates,proto3" json:"cluster_states,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
// (`CreationOnly`)
|
||||
// The column families configured for this table, mapped by column family ID.
|
||||
// Views: `SCHEMA_VIEW`, `FULL`
|
||||
ColumnFamilies map[string]*ColumnFamily `protobuf:"bytes,3,rep,name=column_families,json=columnFamilies" json:"column_families,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
||||
ColumnFamilies map[string]*ColumnFamily `protobuf:"bytes,3,rep,name=column_families,json=columnFamilies,proto3" json:"column_families,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
// (`CreationOnly`)
|
||||
// The granularity (e.g. `MILLIS`, `MICROS`) at which timestamps are stored in
|
||||
// The granularity (i.e. `MILLIS`) at which timestamps are stored in
|
||||
// this table. Timestamps not matching the granularity will be rejected.
|
||||
// If unspecified at creation time, the value will be set to `MILLIS`.
|
||||
// Views: `SCHEMA_VIEW`, `FULL`
|
||||
Granularity Table_TimestampGranularity `protobuf:"varint,4,opt,name=granularity,enum=google.bigtable.admin.v2.Table_TimestampGranularity" json:"granularity,omitempty"`
|
||||
Granularity Table_TimestampGranularity `protobuf:"varint,4,opt,name=granularity,proto3,enum=google.bigtable.admin.v2.Table_TimestampGranularity" json:"granularity,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Table) Reset() { *m = Table{} }
|
||||
func (m *Table) String() string { return proto.CompactTextString(m) }
|
||||
func (*Table) ProtoMessage() {}
|
||||
func (*Table) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{0} }
|
||||
func (m *Table) Reset() { *m = Table{} }
|
||||
func (m *Table) String() string { return proto.CompactTextString(m) }
|
||||
func (*Table) ProtoMessage() {}
|
||||
func (*Table) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ea1666be9e81bb35, []int{0}
|
||||
}
|
||||
|
||||
func (m *Table) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Table.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Table) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Table.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Table) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Table.Merge(m, src)
|
||||
}
|
||||
func (m *Table) XXX_Size() int {
|
||||
return xxx_messageInfo_Table.Size(m)
|
||||
}
|
||||
func (m *Table) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Table.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Table proto.InternalMessageInfo
|
||||
|
||||
func (m *Table) GetName() string {
|
||||
if m != nil {
|
||||
@@ -229,22 +257,40 @@ func (m *Table) GetGranularity() Table_TimestampGranularity {
|
||||
return Table_TIMESTAMP_GRANULARITY_UNSPECIFIED
|
||||
}
|
||||
|
||||
// This is a private alpha release of Cloud Bigtable replication. This feature
|
||||
// is not currently available to most Cloud Bigtable customers. This feature
|
||||
// might be changed in backward-incompatible ways and is not recommended for
|
||||
// production use. It is not subject to any SLA or deprecation policy.
|
||||
//
|
||||
// The state of a table's data in a particular cluster.
|
||||
type Table_ClusterState struct {
|
||||
// (`OutputOnly`)
|
||||
// The state of replication for the table in this cluster.
|
||||
ReplicationState Table_ClusterState_ReplicationState `protobuf:"varint,1,opt,name=replication_state,json=replicationState,enum=google.bigtable.admin.v2.Table_ClusterState_ReplicationState" json:"replication_state,omitempty"`
|
||||
ReplicationState Table_ClusterState_ReplicationState `protobuf:"varint,1,opt,name=replication_state,json=replicationState,proto3,enum=google.bigtable.admin.v2.Table_ClusterState_ReplicationState" json:"replication_state,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Table_ClusterState) Reset() { *m = Table_ClusterState{} }
|
||||
func (m *Table_ClusterState) String() string { return proto.CompactTextString(m) }
|
||||
func (*Table_ClusterState) ProtoMessage() {}
|
||||
func (*Table_ClusterState) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{0, 0} }
|
||||
func (m *Table_ClusterState) Reset() { *m = Table_ClusterState{} }
|
||||
func (m *Table_ClusterState) String() string { return proto.CompactTextString(m) }
|
||||
func (*Table_ClusterState) ProtoMessage() {}
|
||||
func (*Table_ClusterState) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ea1666be9e81bb35, []int{0, 0}
|
||||
}
|
||||
|
||||
func (m *Table_ClusterState) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Table_ClusterState.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Table_ClusterState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Table_ClusterState.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Table_ClusterState) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Table_ClusterState.Merge(m, src)
|
||||
}
|
||||
func (m *Table_ClusterState) XXX_Size() int {
|
||||
return xxx_messageInfo_Table_ClusterState.Size(m)
|
||||
}
|
||||
func (m *Table_ClusterState) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Table_ClusterState.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Table_ClusterState proto.InternalMessageInfo
|
||||
|
||||
func (m *Table_ClusterState) GetReplicationState() Table_ClusterState_ReplicationState {
|
||||
if m != nil {
|
||||
@@ -261,13 +307,36 @@ type ColumnFamily struct {
|
||||
// NOTE: Garbage collection executes opportunistically in the background, and
|
||||
// so it's possible for reads to return a cell even if it matches the active
|
||||
// GC expression for its family.
|
||||
GcRule *GcRule `protobuf:"bytes,1,opt,name=gc_rule,json=gcRule" json:"gc_rule,omitempty"`
|
||||
GcRule *GcRule `protobuf:"bytes,1,opt,name=gc_rule,json=gcRule,proto3" json:"gc_rule,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ColumnFamily) Reset() { *m = ColumnFamily{} }
|
||||
func (m *ColumnFamily) String() string { return proto.CompactTextString(m) }
|
||||
func (*ColumnFamily) ProtoMessage() {}
|
||||
func (*ColumnFamily) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{1} }
|
||||
func (m *ColumnFamily) Reset() { *m = ColumnFamily{} }
|
||||
func (m *ColumnFamily) String() string { return proto.CompactTextString(m) }
|
||||
func (*ColumnFamily) ProtoMessage() {}
|
||||
func (*ColumnFamily) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ea1666be9e81bb35, []int{1}
|
||||
}
|
||||
|
||||
func (m *ColumnFamily) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ColumnFamily.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ColumnFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ColumnFamily.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ColumnFamily) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ColumnFamily.Merge(m, src)
|
||||
}
|
||||
func (m *ColumnFamily) XXX_Size() int {
|
||||
return xxx_messageInfo_ColumnFamily.Size(m)
|
||||
}
|
||||
func (m *ColumnFamily) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ColumnFamily.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ColumnFamily proto.InternalMessageInfo
|
||||
|
||||
func (m *ColumnFamily) GetGcRule() *GcRule {
|
||||
if m != nil {
|
||||
@@ -285,35 +354,64 @@ type GcRule struct {
|
||||
// *GcRule_MaxAge
|
||||
// *GcRule_Intersection_
|
||||
// *GcRule_Union_
|
||||
Rule isGcRule_Rule `protobuf_oneof:"rule"`
|
||||
Rule isGcRule_Rule `protobuf_oneof:"rule"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *GcRule) Reset() { *m = GcRule{} }
|
||||
func (m *GcRule) String() string { return proto.CompactTextString(m) }
|
||||
func (*GcRule) ProtoMessage() {}
|
||||
func (*GcRule) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{2} }
|
||||
func (m *GcRule) Reset() { *m = GcRule{} }
|
||||
func (m *GcRule) String() string { return proto.CompactTextString(m) }
|
||||
func (*GcRule) ProtoMessage() {}
|
||||
func (*GcRule) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ea1666be9e81bb35, []int{2}
|
||||
}
|
||||
|
||||
func (m *GcRule) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_GcRule.Unmarshal(m, b)
|
||||
}
|
||||
func (m *GcRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_GcRule.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *GcRule) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_GcRule.Merge(m, src)
|
||||
}
|
||||
func (m *GcRule) XXX_Size() int {
|
||||
return xxx_messageInfo_GcRule.Size(m)
|
||||
}
|
||||
func (m *GcRule) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_GcRule.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_GcRule proto.InternalMessageInfo
|
||||
|
||||
type isGcRule_Rule interface {
|
||||
isGcRule_Rule()
|
||||
}
|
||||
|
||||
type GcRule_MaxNumVersions struct {
|
||||
MaxNumVersions int32 `protobuf:"varint,1,opt,name=max_num_versions,json=maxNumVersions,oneof"`
|
||||
MaxNumVersions int32 `protobuf:"varint,1,opt,name=max_num_versions,json=maxNumVersions,proto3,oneof"`
|
||||
}
|
||||
|
||||
type GcRule_MaxAge struct {
|
||||
MaxAge *google_protobuf5.Duration `protobuf:"bytes,2,opt,name=max_age,json=maxAge,oneof"`
|
||||
MaxAge *duration.Duration `protobuf:"bytes,2,opt,name=max_age,json=maxAge,proto3,oneof"`
|
||||
}
|
||||
|
||||
type GcRule_Intersection_ struct {
|
||||
Intersection *GcRule_Intersection `protobuf:"bytes,3,opt,name=intersection,oneof"`
|
||||
Intersection *GcRule_Intersection `protobuf:"bytes,3,opt,name=intersection,proto3,oneof"`
|
||||
}
|
||||
|
||||
type GcRule_Union_ struct {
|
||||
Union *GcRule_Union `protobuf:"bytes,4,opt,name=union,oneof"`
|
||||
Union *GcRule_Union `protobuf:"bytes,4,opt,name=union,proto3,oneof"`
|
||||
}
|
||||
|
||||
func (*GcRule_MaxNumVersions) isGcRule_Rule() {}
|
||||
func (*GcRule_MaxAge) isGcRule_Rule() {}
|
||||
func (*GcRule_Intersection_) isGcRule_Rule() {}
|
||||
func (*GcRule_Union_) isGcRule_Rule() {}
|
||||
|
||||
func (*GcRule_MaxAge) isGcRule_Rule() {}
|
||||
|
||||
func (*GcRule_Intersection_) isGcRule_Rule() {}
|
||||
|
||||
func (*GcRule_Union_) isGcRule_Rule() {}
|
||||
|
||||
func (m *GcRule) GetRule() isGcRule_Rule {
|
||||
if m != nil {
|
||||
@@ -329,7 +427,7 @@ func (m *GcRule) GetMaxNumVersions() int32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *GcRule) GetMaxAge() *google_protobuf5.Duration {
|
||||
func (m *GcRule) GetMaxAge() *duration.Duration {
|
||||
if x, ok := m.GetRule().(*GcRule_MaxAge); ok {
|
||||
return x.MaxAge
|
||||
}
|
||||
@@ -403,7 +501,7 @@ func _GcRule_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer)
|
||||
if wire != proto.WireBytes {
|
||||
return true, proto.ErrInternalBadWireType
|
||||
}
|
||||
msg := new(google_protobuf5.Duration)
|
||||
msg := new(duration.Duration)
|
||||
err := b.DecodeMessage(msg)
|
||||
m.Rule = &GcRule_MaxAge{msg}
|
||||
return true, err
|
||||
@@ -433,21 +531,21 @@ func _GcRule_OneofSizer(msg proto.Message) (n int) {
|
||||
// rule
|
||||
switch x := m.Rule.(type) {
|
||||
case *GcRule_MaxNumVersions:
|
||||
n += proto.SizeVarint(1<<3 | proto.WireVarint)
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(x.MaxNumVersions))
|
||||
case *GcRule_MaxAge:
|
||||
s := proto.Size(x.MaxAge)
|
||||
n += proto.SizeVarint(2<<3 | proto.WireBytes)
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(s))
|
||||
n += s
|
||||
case *GcRule_Intersection_:
|
||||
s := proto.Size(x.Intersection)
|
||||
n += proto.SizeVarint(3<<3 | proto.WireBytes)
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(s))
|
||||
n += s
|
||||
case *GcRule_Union_:
|
||||
s := proto.Size(x.Union)
|
||||
n += proto.SizeVarint(4<<3 | proto.WireBytes)
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(s))
|
||||
n += s
|
||||
case nil:
|
||||
@@ -460,13 +558,36 @@ func _GcRule_OneofSizer(msg proto.Message) (n int) {
|
||||
// A GcRule which deletes cells matching all of the given rules.
|
||||
type GcRule_Intersection struct {
|
||||
// Only delete cells which would be deleted by every element of `rules`.
|
||||
Rules []*GcRule `protobuf:"bytes,1,rep,name=rules" json:"rules,omitempty"`
|
||||
Rules []*GcRule `protobuf:"bytes,1,rep,name=rules,proto3" json:"rules,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *GcRule_Intersection) Reset() { *m = GcRule_Intersection{} }
|
||||
func (m *GcRule_Intersection) String() string { return proto.CompactTextString(m) }
|
||||
func (*GcRule_Intersection) ProtoMessage() {}
|
||||
func (*GcRule_Intersection) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{2, 0} }
|
||||
func (m *GcRule_Intersection) Reset() { *m = GcRule_Intersection{} }
|
||||
func (m *GcRule_Intersection) String() string { return proto.CompactTextString(m) }
|
||||
func (*GcRule_Intersection) ProtoMessage() {}
|
||||
func (*GcRule_Intersection) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ea1666be9e81bb35, []int{2, 0}
|
||||
}
|
||||
|
||||
func (m *GcRule_Intersection) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_GcRule_Intersection.Unmarshal(m, b)
|
||||
}
|
||||
func (m *GcRule_Intersection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_GcRule_Intersection.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *GcRule_Intersection) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_GcRule_Intersection.Merge(m, src)
|
||||
}
|
||||
func (m *GcRule_Intersection) XXX_Size() int {
|
||||
return xxx_messageInfo_GcRule_Intersection.Size(m)
|
||||
}
|
||||
func (m *GcRule_Intersection) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_GcRule_Intersection.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_GcRule_Intersection proto.InternalMessageInfo
|
||||
|
||||
func (m *GcRule_Intersection) GetRules() []*GcRule {
|
||||
if m != nil {
|
||||
@@ -478,13 +599,36 @@ func (m *GcRule_Intersection) GetRules() []*GcRule {
|
||||
// A GcRule which deletes cells matching any of the given rules.
|
||||
type GcRule_Union struct {
|
||||
// Delete cells which would be deleted by any element of `rules`.
|
||||
Rules []*GcRule `protobuf:"bytes,1,rep,name=rules" json:"rules,omitempty"`
|
||||
Rules []*GcRule `protobuf:"bytes,1,rep,name=rules,proto3" json:"rules,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *GcRule_Union) Reset() { *m = GcRule_Union{} }
|
||||
func (m *GcRule_Union) String() string { return proto.CompactTextString(m) }
|
||||
func (*GcRule_Union) ProtoMessage() {}
|
||||
func (*GcRule_Union) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{2, 1} }
|
||||
func (m *GcRule_Union) Reset() { *m = GcRule_Union{} }
|
||||
func (m *GcRule_Union) String() string { return proto.CompactTextString(m) }
|
||||
func (*GcRule_Union) ProtoMessage() {}
|
||||
func (*GcRule_Union) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ea1666be9e81bb35, []int{2, 1}
|
||||
}
|
||||
|
||||
func (m *GcRule_Union) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_GcRule_Union.Unmarshal(m, b)
|
||||
}
|
||||
func (m *GcRule_Union) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_GcRule_Union.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *GcRule_Union) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_GcRule_Union.Merge(m, src)
|
||||
}
|
||||
func (m *GcRule_Union) XXX_Size() int {
|
||||
return xxx_messageInfo_GcRule_Union.Size(m)
|
||||
}
|
||||
func (m *GcRule_Union) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_GcRule_Union.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_GcRule_Union proto.InternalMessageInfo
|
||||
|
||||
func (m *GcRule_Union) GetRules() []*GcRule {
|
||||
if m != nil {
|
||||
@@ -493,47 +637,70 @@ func (m *GcRule_Union) GetRules() []*GcRule {
|
||||
return nil
|
||||
}
|
||||
|
||||
// This is a private alpha release of Cloud Bigtable snapshots. This feature
|
||||
// is not currently available to most Cloud Bigtable customers. This feature
|
||||
// might be changed in backward-incompatible ways and is not recommended for
|
||||
// production use. It is not subject to any SLA or deprecation policy.
|
||||
//
|
||||
// A snapshot of a table at a particular time. A snapshot can be used as a
|
||||
// checkpoint for data restoration or a data source for a new table.
|
||||
//
|
||||
// Note: This is a private alpha release of Cloud Bigtable snapshots. This
|
||||
// feature is not currently available to most Cloud Bigtable customers. This
|
||||
// feature might be changed in backward-incompatible ways and is not recommended
|
||||
// for production use. It is not subject to any SLA or deprecation policy.
|
||||
type Snapshot struct {
|
||||
// (`OutputOnly`)
|
||||
// The unique name of the snapshot.
|
||||
// Values are of the form
|
||||
// `projects/<project>/instances/<instance>/clusters/<cluster>/snapshots/<snapshot>`.
|
||||
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
// (`OutputOnly`)
|
||||
// The source table at the time the snapshot was taken.
|
||||
SourceTable *Table `protobuf:"bytes,2,opt,name=source_table,json=sourceTable" json:"source_table,omitempty"`
|
||||
SourceTable *Table `protobuf:"bytes,2,opt,name=source_table,json=sourceTable,proto3" json:"source_table,omitempty"`
|
||||
// (`OutputOnly`)
|
||||
// The size of the data in the source table at the time the snapshot was
|
||||
// taken. In some cases, this value may be computed asynchronously via a
|
||||
// background process and a placeholder of 0 will be used in the meantime.
|
||||
DataSizeBytes int64 `protobuf:"varint,3,opt,name=data_size_bytes,json=dataSizeBytes" json:"data_size_bytes,omitempty"`
|
||||
DataSizeBytes int64 `protobuf:"varint,3,opt,name=data_size_bytes,json=dataSizeBytes,proto3" json:"data_size_bytes,omitempty"`
|
||||
// (`OutputOnly`)
|
||||
// The time when the snapshot is created.
|
||||
CreateTime *google_protobuf1.Timestamp `protobuf:"bytes,4,opt,name=create_time,json=createTime" json:"create_time,omitempty"`
|
||||
CreateTime *timestamp.Timestamp `protobuf:"bytes,4,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
|
||||
// (`OutputOnly`)
|
||||
// The time when the snapshot will be deleted. The maximum amount of time a
|
||||
// snapshot can stay active is 365 days. If 'ttl' is not specified,
|
||||
// the default maximum of 365 days will be used.
|
||||
DeleteTime *google_protobuf1.Timestamp `protobuf:"bytes,5,opt,name=delete_time,json=deleteTime" json:"delete_time,omitempty"`
|
||||
DeleteTime *timestamp.Timestamp `protobuf:"bytes,5,opt,name=delete_time,json=deleteTime,proto3" json:"delete_time,omitempty"`
|
||||
// (`OutputOnly`)
|
||||
// The current state of the snapshot.
|
||||
State Snapshot_State `protobuf:"varint,6,opt,name=state,enum=google.bigtable.admin.v2.Snapshot_State" json:"state,omitempty"`
|
||||
State Snapshot_State `protobuf:"varint,6,opt,name=state,proto3,enum=google.bigtable.admin.v2.Snapshot_State" json:"state,omitempty"`
|
||||
// (`OutputOnly`)
|
||||
// Description of the snapshot.
|
||||
Description string `protobuf:"bytes,7,opt,name=description" json:"description,omitempty"`
|
||||
Description string `protobuf:"bytes,7,opt,name=description,proto3" json:"description,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Snapshot) Reset() { *m = Snapshot{} }
|
||||
func (m *Snapshot) String() string { return proto.CompactTextString(m) }
|
||||
func (*Snapshot) ProtoMessage() {}
|
||||
func (*Snapshot) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{3} }
|
||||
func (m *Snapshot) Reset() { *m = Snapshot{} }
|
||||
func (m *Snapshot) String() string { return proto.CompactTextString(m) }
|
||||
func (*Snapshot) ProtoMessage() {}
|
||||
func (*Snapshot) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_ea1666be9e81bb35, []int{3}
|
||||
}
|
||||
|
||||
func (m *Snapshot) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Snapshot.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Snapshot) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Snapshot.Merge(m, src)
|
||||
}
|
||||
func (m *Snapshot) XXX_Size() int {
|
||||
return xxx_messageInfo_Snapshot.Size(m)
|
||||
}
|
||||
func (m *Snapshot) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Snapshot.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Snapshot proto.InternalMessageInfo
|
||||
|
||||
func (m *Snapshot) GetName() string {
|
||||
if m != nil {
|
||||
@@ -556,14 +723,14 @@ func (m *Snapshot) GetDataSizeBytes() int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Snapshot) GetCreateTime() *google_protobuf1.Timestamp {
|
||||
func (m *Snapshot) GetCreateTime() *timestamp.Timestamp {
|
||||
if m != nil {
|
||||
return m.CreateTime
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Snapshot) GetDeleteTime() *google_protobuf1.Timestamp {
|
||||
func (m *Snapshot) GetDeleteTime() *timestamp.Timestamp {
|
||||
if m != nil {
|
||||
return m.DeleteTime
|
||||
}
|
||||
@@ -585,22 +752,26 @@ func (m *Snapshot) GetDescription() string {
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterEnum("google.bigtable.admin.v2.Table_TimestampGranularity", Table_TimestampGranularity_name, Table_TimestampGranularity_value)
|
||||
proto.RegisterEnum("google.bigtable.admin.v2.Table_View", Table_View_name, Table_View_value)
|
||||
proto.RegisterEnum("google.bigtable.admin.v2.Table_ClusterState_ReplicationState", Table_ClusterState_ReplicationState_name, Table_ClusterState_ReplicationState_value)
|
||||
proto.RegisterEnum("google.bigtable.admin.v2.Snapshot_State", Snapshot_State_name, Snapshot_State_value)
|
||||
proto.RegisterType((*Table)(nil), "google.bigtable.admin.v2.Table")
|
||||
proto.RegisterMapType((map[string]*Table_ClusterState)(nil), "google.bigtable.admin.v2.Table.ClusterStatesEntry")
|
||||
proto.RegisterMapType((map[string]*ColumnFamily)(nil), "google.bigtable.admin.v2.Table.ColumnFamiliesEntry")
|
||||
proto.RegisterType((*Table_ClusterState)(nil), "google.bigtable.admin.v2.Table.ClusterState")
|
||||
proto.RegisterType((*ColumnFamily)(nil), "google.bigtable.admin.v2.ColumnFamily")
|
||||
proto.RegisterType((*GcRule)(nil), "google.bigtable.admin.v2.GcRule")
|
||||
proto.RegisterType((*GcRule_Intersection)(nil), "google.bigtable.admin.v2.GcRule.Intersection")
|
||||
proto.RegisterType((*GcRule_Union)(nil), "google.bigtable.admin.v2.GcRule.Union")
|
||||
proto.RegisterType((*Snapshot)(nil), "google.bigtable.admin.v2.Snapshot")
|
||||
proto.RegisterEnum("google.bigtable.admin.v2.Table_TimestampGranularity", Table_TimestampGranularity_name, Table_TimestampGranularity_value)
|
||||
proto.RegisterEnum("google.bigtable.admin.v2.Table_View", Table_View_name, Table_View_value)
|
||||
proto.RegisterEnum("google.bigtable.admin.v2.Table_ClusterState_ReplicationState", Table_ClusterState_ReplicationState_name, Table_ClusterState_ReplicationState_value)
|
||||
proto.RegisterEnum("google.bigtable.admin.v2.Snapshot_State", Snapshot_State_name, Snapshot_State_value)
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("google/bigtable/admin/v2/table.proto", fileDescriptor4) }
|
||||
func init() {
|
||||
proto.RegisterFile("google/bigtable/admin/v2/table.proto", fileDescriptor_ea1666be9e81bb35)
|
||||
}
|
||||
|
||||
var fileDescriptor4 = []byte{
|
||||
var fileDescriptor_ea1666be9e81bb35 = []byte{
|
||||
// 965 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xff, 0x6e, 0xdb, 0x54,
|
||||
0x18, 0xad, 0xe3, 0x38, 0x6d, 0xbf, 0xa4, 0xad, 0xb9, 0x1d, 0x22, 0x8b, 0xa6, 0x2d, 0x44, 0x30,
|
||||
|
1057
vendor/google.golang.org/genproto/googleapis/bigtable/v1/bigtable_data.pb.go
generated
vendored
1057
vendor/google.golang.org/genproto/googleapis/bigtable/v1/bigtable_data.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
127
vendor/google.golang.org/genproto/googleapis/bigtable/v1/bigtable_service.pb.go
generated
vendored
127
vendor/google.golang.org/genproto/googleapis/bigtable/v1/bigtable_service.pb.go
generated
vendored
@@ -3,15 +3,14 @@
|
||||
|
||||
package bigtable
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import _ "google.golang.org/genproto/googleapis/api/annotations"
|
||||
import google_protobuf2 "github.com/golang/protobuf/ptypes/empty"
|
||||
|
||||
import (
|
||||
context "golang.org/x/net/context"
|
||||
context "context"
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
empty "github.com/golang/protobuf/ptypes/empty"
|
||||
_ "google.golang.org/genproto/googleapis/api/annotations"
|
||||
grpc "google.golang.org/grpc"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
@@ -19,6 +18,53 @@ var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("google/bigtable/v1/bigtable_service.proto", fileDescriptor_a9a2f3a013379609)
|
||||
}
|
||||
|
||||
var fileDescriptor_a9a2f3a013379609 = []byte{
|
||||
// 521 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0xcd, 0x6e, 0xd4, 0x30,
|
||||
0x10, 0xc7, 0x65, 0x0e, 0xa8, 0x58, 0x42, 0x08, 0x4b, 0x14, 0x69, 0xe1, 0x14, 0xa0, 0xa2, 0x11,
|
||||
0x8d, 0xdb, 0x72, 0x0b, 0xe2, 0xd0, 0x45, 0x50, 0x21, 0x58, 0x51, 0xa5, 0xe2, 0x43, 0xe5, 0xb0,
|
||||
0x78, 0x93, 0x69, 0x08, 0x4d, 0xe2, 0x60, 0x7b, 0x37, 0x5a, 0xaa, 0x5e, 0x38, 0x71, 0xe7, 0x11,
|
||||
0x10, 0x17, 0x5e, 0x80, 0x23, 0xef, 0x00, 0x67, 0x6e, 0x3c, 0x08, 0xb2, 0x63, 0x2f, 0x2c, 0x0d,
|
||||
0xcb, 0x8a, 0xee, 0x29, 0x8e, 0xe6, 0x3f, 0x33, 0xbf, 0xff, 0xf8, 0x03, 0xaf, 0xa6, 0x9c, 0xa7,
|
||||
0x39, 0xd0, 0x41, 0x96, 0x2a, 0x36, 0xc8, 0x81, 0x8e, 0x36, 0x26, 0xeb, 0xbe, 0x04, 0x31, 0xca,
|
||||
0x62, 0x08, 0x2a, 0xc1, 0x15, 0x27, 0xa4, 0x91, 0x06, 0x2e, 0x1c, 0x8c, 0x36, 0x3a, 0x97, 0x6d,
|
||||
0x3a, 0xab, 0x32, 0xca, 0xca, 0x92, 0x2b, 0xa6, 0x32, 0x5e, 0xca, 0x26, 0xa3, 0xb3, 0x32, 0xab,
|
||||
0x78, 0xc2, 0x14, 0xb3, 0xba, 0xcd, 0x39, 0x20, 0xfa, 0x05, 0x48, 0xc9, 0x52, 0x70, 0xb5, 0x2f,
|
||||
0xd9, 0x1c, 0xf3, 0x37, 0x18, 0xee, 0x53, 0x28, 0x2a, 0x35, 0x6e, 0x82, 0x9b, 0xdf, 0x97, 0xf0,
|
||||
0xb9, 0xae, 0x2d, 0xb0, 0xdb, 0xe4, 0x93, 0x8f, 0x08, 0x2f, 0x45, 0xc0, 0x92, 0x88, 0xd7, 0x92,
|
||||
0x5c, 0x09, 0x8e, 0x9b, 0x09, 0x5c, 0x34, 0x82, 0xd7, 0x43, 0x90, 0xaa, 0x73, 0x75, 0xb6, 0x48,
|
||||
0x56, 0xbc, 0x94, 0xe0, 0x3d, 0x7c, 0xfb, 0xed, 0xc7, 0xfb, 0x53, 0xf7, 0xbc, 0x2d, 0x4d, 0x7d,
|
||||
0xd8, 0x30, 0x97, 0xac, 0x80, 0xdb, 0x95, 0xe0, 0xaf, 0x20, 0x56, 0x92, 0xfa, 0xf4, 0x0d, 0x2f,
|
||||
0x41, 0x7f, 0xe3, 0x7c, 0x28, 0x15, 0x08, 0xbd, 0x34, 0x42, 0x49, 0xfd, 0x23, 0x2a, 0x78, 0x2d,
|
||||
0x43, 0x01, 0x2c, 0x09, 0x91, 0xbf, 0x8e, 0xc8, 0x67, 0x84, 0xcf, 0xee, 0xb2, 0xa2, 0xca, 0x21,
|
||||
0xe2, 0xf5, 0x03, 0x18, 0x4b, 0x72, 0xbd, 0x8d, 0x63, 0x4a, 0xe2, 0x88, 0x57, 0xe7, 0x50, 0x5a,
|
||||
0xec, 0x47, 0x06, 0xfb, 0x3e, 0xd9, 0x3e, 0x11, 0xb6, 0x34, 0xb5, 0x75, 0xe1, 0x75, 0x44, 0x3e,
|
||||
0x20, 0x7c, 0xa6, 0x37, 0x54, 0x4c, 0xe9, 0x66, 0xa4, 0x75, 0x7a, 0x93, 0xb0, 0x23, 0x5e, 0x76,
|
||||
0x2a, 0xb7, 0x8f, 0xc1, 0x5d, 0xbd, 0x8f, 0xde, 0x33, 0x83, 0x17, 0x79, 0xbd, 0x93, 0xe0, 0xd1,
|
||||
0x43, 0xc1, 0xeb, 0xfe, 0x01, 0x8c, 0x8f, 0xc2, 0xc2, 0x34, 0x0e, 0x91, 0x4f, 0x3e, 0x21, 0x8c,
|
||||
0x27, 0x18, 0x92, 0x5c, 0x9b, 0x89, 0x39, 0x99, 0xec, 0xca, 0xbf, 0x64, 0x76, 0xac, 0x3d, 0xc3,
|
||||
0xbd, 0xed, 0x75, 0xff, 0x93, 0xdb, 0x82, 0xea, 0x9a, 0x1a, 0xf6, 0x2b, 0xc2, 0xe7, 0xef, 0xbc,
|
||||
0x84, 0xf8, 0x60, 0xab, 0x4c, 0x7e, 0x8d, 0xf6, 0x46, 0x1b, 0xcc, 0x31, 0x99, 0x43, 0x5f, 0x9b,
|
||||
0x53, 0x6d, 0x1d, 0xbc, 0x30, 0x0e, 0xf6, 0xbc, 0xc7, 0x0b, 0x9a, 0x7c, 0x3c, 0xd5, 0x49, 0x9b,
|
||||
0xfa, 0x82, 0x30, 0xd1, 0xd7, 0xa8, 0xc7, 0x93, 0x6c, 0x7f, 0xfc, 0x54, 0x64, 0x8d, 0xab, 0xb5,
|
||||
0xbf, 0x5d, 0xb7, 0x69, 0x9d, 0xb3, 0x75, 0xb1, 0x55, 0xce, 0x6b, 0x8f, 0x19, 0x03, 0xcf, 0xbd,
|
||||
0x27, 0x0b, 0x32, 0x20, 0xa6, 0x11, 0x42, 0xe4, 0x77, 0x2b, 0xbc, 0x1c, 0xf3, 0xa2, 0x05, 0xa0,
|
||||
0x7b, 0xe1, 0x8f, 0x67, 0x47, 0xee, 0xe8, 0x73, 0xbd, 0x83, 0xf6, 0x42, 0x2b, 0x4e, 0x79, 0xce,
|
||||
0xca, 0x34, 0xe0, 0x22, 0xa5, 0x29, 0x94, 0xe6, 0xd4, 0xd3, 0x26, 0xc4, 0xaa, 0x4c, 0xfe, 0xfe,
|
||||
0x04, 0xde, 0x72, 0xeb, 0x77, 0x08, 0x0d, 0x4e, 0x1b, 0xe5, 0xcd, 0x9f, 0x01, 0x00, 0x00, 0xff,
|
||||
0xff, 0x4c, 0x27, 0x6e, 0x9a, 0xb0, 0x05, 0x00, 0x00,
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConn
|
||||
@@ -27,8 +73,9 @@ var _ grpc.ClientConn
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion4
|
||||
|
||||
// Client API for BigtableService service
|
||||
|
||||
// BigtableServiceClient is the client API for BigtableService service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||||
type BigtableServiceClient interface {
|
||||
// Streams back the contents of all requested rows, optionally applying
|
||||
// the same Reader filter to each. Depending on their size, rows may be
|
||||
@@ -42,7 +89,7 @@ type BigtableServiceClient interface {
|
||||
SampleRowKeys(ctx context.Context, in *SampleRowKeysRequest, opts ...grpc.CallOption) (BigtableService_SampleRowKeysClient, error)
|
||||
// Mutates a row atomically. Cells already present in the row are left
|
||||
// unchanged unless explicitly changed by 'mutation'.
|
||||
MutateRow(ctx context.Context, in *MutateRowRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error)
|
||||
MutateRow(ctx context.Context, in *MutateRowRequest, opts ...grpc.CallOption) (*empty.Empty, error)
|
||||
// Mutates multiple rows in a batch. Each individual row is mutated
|
||||
// atomically as in MutateRow, but the entire batch is not executed
|
||||
// atomically.
|
||||
@@ -65,7 +112,7 @@ func NewBigtableServiceClient(cc *grpc.ClientConn) BigtableServiceClient {
|
||||
}
|
||||
|
||||
func (c *bigtableServiceClient) ReadRows(ctx context.Context, in *ReadRowsRequest, opts ...grpc.CallOption) (BigtableService_ReadRowsClient, error) {
|
||||
stream, err := grpc.NewClientStream(ctx, &_BigtableService_serviceDesc.Streams[0], c.cc, "/google.bigtable.v1.BigtableService/ReadRows", opts...)
|
||||
stream, err := c.cc.NewStream(ctx, &_BigtableService_serviceDesc.Streams[0], "/google.bigtable.v1.BigtableService/ReadRows", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -97,7 +144,7 @@ func (x *bigtableServiceReadRowsClient) Recv() (*ReadRowsResponse, error) {
|
||||
}
|
||||
|
||||
func (c *bigtableServiceClient) SampleRowKeys(ctx context.Context, in *SampleRowKeysRequest, opts ...grpc.CallOption) (BigtableService_SampleRowKeysClient, error) {
|
||||
stream, err := grpc.NewClientStream(ctx, &_BigtableService_serviceDesc.Streams[1], c.cc, "/google.bigtable.v1.BigtableService/SampleRowKeys", opts...)
|
||||
stream, err := c.cc.NewStream(ctx, &_BigtableService_serviceDesc.Streams[1], "/google.bigtable.v1.BigtableService/SampleRowKeys", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -128,9 +175,9 @@ func (x *bigtableServiceSampleRowKeysClient) Recv() (*SampleRowKeysResponse, err
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (c *bigtableServiceClient) MutateRow(ctx context.Context, in *MutateRowRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) {
|
||||
out := new(google_protobuf2.Empty)
|
||||
err := grpc.Invoke(ctx, "/google.bigtable.v1.BigtableService/MutateRow", in, out, c.cc, opts...)
|
||||
func (c *bigtableServiceClient) MutateRow(ctx context.Context, in *MutateRowRequest, opts ...grpc.CallOption) (*empty.Empty, error) {
|
||||
out := new(empty.Empty)
|
||||
err := c.cc.Invoke(ctx, "/google.bigtable.v1.BigtableService/MutateRow", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -139,7 +186,7 @@ func (c *bigtableServiceClient) MutateRow(ctx context.Context, in *MutateRowRequ
|
||||
|
||||
func (c *bigtableServiceClient) MutateRows(ctx context.Context, in *MutateRowsRequest, opts ...grpc.CallOption) (*MutateRowsResponse, error) {
|
||||
out := new(MutateRowsResponse)
|
||||
err := grpc.Invoke(ctx, "/google.bigtable.v1.BigtableService/MutateRows", in, out, c.cc, opts...)
|
||||
err := c.cc.Invoke(ctx, "/google.bigtable.v1.BigtableService/MutateRows", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -148,7 +195,7 @@ func (c *bigtableServiceClient) MutateRows(ctx context.Context, in *MutateRowsRe
|
||||
|
||||
func (c *bigtableServiceClient) CheckAndMutateRow(ctx context.Context, in *CheckAndMutateRowRequest, opts ...grpc.CallOption) (*CheckAndMutateRowResponse, error) {
|
||||
out := new(CheckAndMutateRowResponse)
|
||||
err := grpc.Invoke(ctx, "/google.bigtable.v1.BigtableService/CheckAndMutateRow", in, out, c.cc, opts...)
|
||||
err := c.cc.Invoke(ctx, "/google.bigtable.v1.BigtableService/CheckAndMutateRow", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -157,15 +204,14 @@ func (c *bigtableServiceClient) CheckAndMutateRow(ctx context.Context, in *Check
|
||||
|
||||
func (c *bigtableServiceClient) ReadModifyWriteRow(ctx context.Context, in *ReadModifyWriteRowRequest, opts ...grpc.CallOption) (*Row, error) {
|
||||
out := new(Row)
|
||||
err := grpc.Invoke(ctx, "/google.bigtable.v1.BigtableService/ReadModifyWriteRow", in, out, c.cc, opts...)
|
||||
err := c.cc.Invoke(ctx, "/google.bigtable.v1.BigtableService/ReadModifyWriteRow", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Server API for BigtableService service
|
||||
|
||||
// BigtableServiceServer is the server API for BigtableService service.
|
||||
type BigtableServiceServer interface {
|
||||
// Streams back the contents of all requested rows, optionally applying
|
||||
// the same Reader filter to each. Depending on their size, rows may be
|
||||
@@ -179,7 +225,7 @@ type BigtableServiceServer interface {
|
||||
SampleRowKeys(*SampleRowKeysRequest, BigtableService_SampleRowKeysServer) error
|
||||
// Mutates a row atomically. Cells already present in the row are left
|
||||
// unchanged unless explicitly changed by 'mutation'.
|
||||
MutateRow(context.Context, *MutateRowRequest) (*google_protobuf2.Empty, error)
|
||||
MutateRow(context.Context, *MutateRowRequest) (*empty.Empty, error)
|
||||
// Mutates multiple rows in a batch. Each individual row is mutated
|
||||
// atomically as in MutateRow, but the entire batch is not executed
|
||||
// atomically.
|
||||
@@ -346,42 +392,3 @@ var _BigtableService_serviceDesc = grpc.ServiceDesc{
|
||||
},
|
||||
Metadata: "google/bigtable/v1/bigtable_service.proto",
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("google/bigtable/v1/bigtable_service.proto", fileDescriptor1) }
|
||||
|
||||
var fileDescriptor1 = []byte{
|
||||
// 521 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0xcd, 0x6e, 0xd4, 0x30,
|
||||
0x10, 0xc7, 0x65, 0x0e, 0xa8, 0x58, 0x42, 0x08, 0x4b, 0x14, 0x69, 0xe1, 0x14, 0xa0, 0xa2, 0x11,
|
||||
0x8d, 0xdb, 0x72, 0x0b, 0xe2, 0xd0, 0x45, 0x50, 0x21, 0x58, 0x51, 0xa5, 0xe2, 0x43, 0xe5, 0xb0,
|
||||
0x78, 0x93, 0x69, 0x08, 0x4d, 0xe2, 0x60, 0x7b, 0x37, 0x5a, 0xaa, 0x5e, 0x38, 0x71, 0xe7, 0x11,
|
||||
0x10, 0x17, 0x5e, 0x80, 0x23, 0xef, 0x00, 0x67, 0x6e, 0x3c, 0x08, 0xb2, 0x63, 0x2f, 0x2c, 0x0d,
|
||||
0xcb, 0x8a, 0xee, 0x29, 0x8e, 0xe6, 0x3f, 0x33, 0xbf, 0xff, 0xf8, 0x03, 0xaf, 0xa6, 0x9c, 0xa7,
|
||||
0x39, 0xd0, 0x41, 0x96, 0x2a, 0x36, 0xc8, 0x81, 0x8e, 0x36, 0x26, 0xeb, 0xbe, 0x04, 0x31, 0xca,
|
||||
0x62, 0x08, 0x2a, 0xc1, 0x15, 0x27, 0xa4, 0x91, 0x06, 0x2e, 0x1c, 0x8c, 0x36, 0x3a, 0x97, 0x6d,
|
||||
0x3a, 0xab, 0x32, 0xca, 0xca, 0x92, 0x2b, 0xa6, 0x32, 0x5e, 0xca, 0x26, 0xa3, 0xb3, 0x32, 0xab,
|
||||
0x78, 0xc2, 0x14, 0xb3, 0xba, 0xcd, 0x39, 0x20, 0xfa, 0x05, 0x48, 0xc9, 0x52, 0x70, 0xb5, 0x2f,
|
||||
0xd9, 0x1c, 0xf3, 0x37, 0x18, 0xee, 0x53, 0x28, 0x2a, 0x35, 0x6e, 0x82, 0x9b, 0xdf, 0x97, 0xf0,
|
||||
0xb9, 0xae, 0x2d, 0xb0, 0xdb, 0xe4, 0x93, 0x8f, 0x08, 0x2f, 0x45, 0xc0, 0x92, 0x88, 0xd7, 0x92,
|
||||
0x5c, 0x09, 0x8e, 0x9b, 0x09, 0x5c, 0x34, 0x82, 0xd7, 0x43, 0x90, 0xaa, 0x73, 0x75, 0xb6, 0x48,
|
||||
0x56, 0xbc, 0x94, 0xe0, 0x3d, 0x7c, 0xfb, 0xed, 0xc7, 0xfb, 0x53, 0xf7, 0xbc, 0x2d, 0x4d, 0x7d,
|
||||
0xd8, 0x30, 0x97, 0xac, 0x80, 0xdb, 0x95, 0xe0, 0xaf, 0x20, 0x56, 0x92, 0xfa, 0xf4, 0x0d, 0x2f,
|
||||
0x41, 0x7f, 0xe3, 0x7c, 0x28, 0x15, 0x08, 0xbd, 0x34, 0x42, 0x49, 0xfd, 0x23, 0x2a, 0x78, 0x2d,
|
||||
0x43, 0x01, 0x2c, 0x09, 0x91, 0xbf, 0x8e, 0xc8, 0x67, 0x84, 0xcf, 0xee, 0xb2, 0xa2, 0xca, 0x21,
|
||||
0xe2, 0xf5, 0x03, 0x18, 0x4b, 0x72, 0xbd, 0x8d, 0x63, 0x4a, 0xe2, 0x88, 0x57, 0xe7, 0x50, 0x5a,
|
||||
0xec, 0x47, 0x06, 0xfb, 0x3e, 0xd9, 0x3e, 0x11, 0xb6, 0x34, 0xb5, 0x75, 0xe1, 0x75, 0x44, 0x3e,
|
||||
0x20, 0x7c, 0xa6, 0x37, 0x54, 0x4c, 0xe9, 0x66, 0xa4, 0x75, 0x7a, 0x93, 0xb0, 0x23, 0x5e, 0x76,
|
||||
0x2a, 0xb7, 0x8f, 0xc1, 0x5d, 0xbd, 0x8f, 0xde, 0x33, 0x83, 0x17, 0x79, 0xbd, 0x93, 0xe0, 0xd1,
|
||||
0x43, 0xc1, 0xeb, 0xfe, 0x01, 0x8c, 0x8f, 0xc2, 0xc2, 0x34, 0x0e, 0x91, 0x4f, 0x3e, 0x21, 0x8c,
|
||||
0x27, 0x18, 0x92, 0x5c, 0x9b, 0x89, 0x39, 0x99, 0xec, 0xca, 0xbf, 0x64, 0x76, 0xac, 0x3d, 0xc3,
|
||||
0xbd, 0xed, 0x75, 0xff, 0x93, 0xdb, 0x82, 0xea, 0x9a, 0x1a, 0xf6, 0x2b, 0xc2, 0xe7, 0xef, 0xbc,
|
||||
0x84, 0xf8, 0x60, 0xab, 0x4c, 0x7e, 0x8d, 0xf6, 0x46, 0x1b, 0xcc, 0x31, 0x99, 0x43, 0x5f, 0x9b,
|
||||
0x53, 0x6d, 0x1d, 0xbc, 0x30, 0x0e, 0xf6, 0xbc, 0xc7, 0x0b, 0x9a, 0x7c, 0x3c, 0xd5, 0x49, 0x9b,
|
||||
0xfa, 0x82, 0x30, 0xd1, 0xd7, 0xa8, 0xc7, 0x93, 0x6c, 0x7f, 0xfc, 0x54, 0x64, 0x8d, 0xab, 0xb5,
|
||||
0xbf, 0x5d, 0xb7, 0x69, 0x9d, 0xb3, 0x75, 0xb1, 0x55, 0xce, 0x6b, 0x8f, 0x19, 0x03, 0xcf, 0xbd,
|
||||
0x27, 0x0b, 0x32, 0x20, 0xa6, 0x11, 0x42, 0xe4, 0x77, 0x2b, 0xbc, 0x1c, 0xf3, 0xa2, 0x05, 0xa0,
|
||||
0x7b, 0xe1, 0x8f, 0x67, 0x47, 0xee, 0xe8, 0x73, 0xbd, 0x83, 0xf6, 0x42, 0x2b, 0x4e, 0x79, 0xce,
|
||||
0xca, 0x34, 0xe0, 0x22, 0xa5, 0x29, 0x94, 0xe6, 0xd4, 0xd3, 0x26, 0xc4, 0xaa, 0x4c, 0xfe, 0xfe,
|
||||
0x04, 0xde, 0x72, 0xeb, 0x77, 0x08, 0x0d, 0x4e, 0x1b, 0xe5, 0xcd, 0x9f, 0x01, 0x00, 0x00, 0xff,
|
||||
0xff, 0x4c, 0x27, 0x6e, 0x9a, 0xb0, 0x05, 0x00, 0x00,
|
||||
}
|
||||
|
494
vendor/google.golang.org/genproto/googleapis/bigtable/v1/bigtable_service_messages.pb.go
generated
vendored
494
vendor/google.golang.org/genproto/googleapis/bigtable/v1/bigtable_service_messages.pb.go
generated
vendored
@@ -3,20 +3,28 @@
|
||||
|
||||
package bigtable
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import google_rpc "google.golang.org/genproto/googleapis/rpc/status"
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
status "google.golang.org/genproto/googleapis/rpc/status"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// Request message for BigtableServer.ReadRows.
|
||||
type ReadRowsRequest struct {
|
||||
// The unique name of the table from which to read.
|
||||
TableName string `protobuf:"bytes,1,opt,name=table_name,json=tableName" json:"table_name,omitempty"`
|
||||
TableName string `protobuf:"bytes,1,opt,name=table_name,json=tableName,proto3" json:"table_name,omitempty"`
|
||||
// If neither row_key nor row_range is set, reads from all rows.
|
||||
//
|
||||
// Types that are valid to be assigned to Target:
|
||||
@@ -26,24 +34,54 @@ type ReadRowsRequest struct {
|
||||
Target isReadRowsRequest_Target `protobuf_oneof:"target"`
|
||||
// The filter to apply to the contents of the specified row(s). If unset,
|
||||
// reads the entire table.
|
||||
Filter *RowFilter `protobuf:"bytes,5,opt,name=filter" json:"filter,omitempty"`
|
||||
Filter *RowFilter `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"`
|
||||
// By default, rows are read sequentially, producing results which are
|
||||
// guaranteed to arrive in increasing row order. Setting
|
||||
// "allow_row_interleaving" to true allows multiple rows to be interleaved in
|
||||
// the response stream, which increases throughput but breaks this guarantee,
|
||||
// and may force the client to use more memory to buffer partially-received
|
||||
// rows. Cannot be set to true when specifying "num_rows_limit".
|
||||
AllowRowInterleaving bool `protobuf:"varint,6,opt,name=allow_row_interleaving,json=allowRowInterleaving" json:"allow_row_interleaving,omitempty"`
|
||||
AllowRowInterleaving bool `protobuf:"varint,6,opt,name=allow_row_interleaving,json=allowRowInterleaving,proto3" json:"allow_row_interleaving,omitempty"`
|
||||
// The read will terminate after committing to N rows' worth of results. The
|
||||
// default (zero) is to return all results.
|
||||
// Note that "allow_row_interleaving" cannot be set to true when this is set.
|
||||
NumRowsLimit int64 `protobuf:"varint,7,opt,name=num_rows_limit,json=numRowsLimit" json:"num_rows_limit,omitempty"`
|
||||
NumRowsLimit int64 `protobuf:"varint,7,opt,name=num_rows_limit,json=numRowsLimit,proto3" json:"num_rows_limit,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ReadRowsRequest) Reset() { *m = ReadRowsRequest{} }
|
||||
func (m *ReadRowsRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*ReadRowsRequest) ProtoMessage() {}
|
||||
func (*ReadRowsRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} }
|
||||
func (m *ReadRowsRequest) Reset() { *m = ReadRowsRequest{} }
|
||||
func (m *ReadRowsRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*ReadRowsRequest) ProtoMessage() {}
|
||||
func (*ReadRowsRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_f3f31120fcc6bad7, []int{0}
|
||||
}
|
||||
|
||||
func (m *ReadRowsRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ReadRowsRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ReadRowsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ReadRowsRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ReadRowsRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ReadRowsRequest.Merge(m, src)
|
||||
}
|
||||
func (m *ReadRowsRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_ReadRowsRequest.Size(m)
|
||||
}
|
||||
func (m *ReadRowsRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ReadRowsRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ReadRowsRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *ReadRowsRequest) GetTableName() string {
|
||||
if m != nil {
|
||||
return m.TableName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type isReadRowsRequest_Target interface {
|
||||
isReadRowsRequest_Target()
|
||||
@@ -52,16 +90,20 @@ type isReadRowsRequest_Target interface {
|
||||
type ReadRowsRequest_RowKey struct {
|
||||
RowKey []byte `protobuf:"bytes,2,opt,name=row_key,json=rowKey,proto3,oneof"`
|
||||
}
|
||||
|
||||
type ReadRowsRequest_RowRange struct {
|
||||
RowRange *RowRange `protobuf:"bytes,3,opt,name=row_range,json=rowRange,oneof"`
|
||||
}
|
||||
type ReadRowsRequest_RowSet struct {
|
||||
RowSet *RowSet `protobuf:"bytes,8,opt,name=row_set,json=rowSet,oneof"`
|
||||
RowRange *RowRange `protobuf:"bytes,3,opt,name=row_range,json=rowRange,proto3,oneof"`
|
||||
}
|
||||
|
||||
func (*ReadRowsRequest_RowKey) isReadRowsRequest_Target() {}
|
||||
type ReadRowsRequest_RowSet struct {
|
||||
RowSet *RowSet `protobuf:"bytes,8,opt,name=row_set,json=rowSet,proto3,oneof"`
|
||||
}
|
||||
|
||||
func (*ReadRowsRequest_RowKey) isReadRowsRequest_Target() {}
|
||||
|
||||
func (*ReadRowsRequest_RowRange) isReadRowsRequest_Target() {}
|
||||
func (*ReadRowsRequest_RowSet) isReadRowsRequest_Target() {}
|
||||
|
||||
func (*ReadRowsRequest_RowSet) isReadRowsRequest_Target() {}
|
||||
|
||||
func (m *ReadRowsRequest) GetTarget() isReadRowsRequest_Target {
|
||||
if m != nil {
|
||||
@@ -70,13 +112,6 @@ func (m *ReadRowsRequest) GetTarget() isReadRowsRequest_Target {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ReadRowsRequest) GetTableName() string {
|
||||
if m != nil {
|
||||
return m.TableName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ReadRowsRequest) GetRowKey() []byte {
|
||||
if x, ok := m.GetTarget().(*ReadRowsRequest_RowKey); ok {
|
||||
return x.RowKey
|
||||
@@ -188,17 +223,17 @@ func _ReadRowsRequest_OneofSizer(msg proto.Message) (n int) {
|
||||
// target
|
||||
switch x := m.Target.(type) {
|
||||
case *ReadRowsRequest_RowKey:
|
||||
n += proto.SizeVarint(2<<3 | proto.WireBytes)
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(len(x.RowKey)))
|
||||
n += len(x.RowKey)
|
||||
case *ReadRowsRequest_RowRange:
|
||||
s := proto.Size(x.RowRange)
|
||||
n += proto.SizeVarint(3<<3 | proto.WireBytes)
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(s))
|
||||
n += s
|
||||
case *ReadRowsRequest_RowSet:
|
||||
s := proto.Size(x.RowSet)
|
||||
n += proto.SizeVarint(8<<3 | proto.WireBytes)
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(s))
|
||||
n += s
|
||||
case nil:
|
||||
@@ -215,13 +250,36 @@ type ReadRowsResponse struct {
|
||||
// "allow_row_interleaving" was specified in the request.
|
||||
RowKey []byte `protobuf:"bytes,1,opt,name=row_key,json=rowKey,proto3" json:"row_key,omitempty"`
|
||||
// One or more chunks of the row specified by "row_key".
|
||||
Chunks []*ReadRowsResponse_Chunk `protobuf:"bytes,2,rep,name=chunks" json:"chunks,omitempty"`
|
||||
Chunks []*ReadRowsResponse_Chunk `protobuf:"bytes,2,rep,name=chunks,proto3" json:"chunks,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ReadRowsResponse) Reset() { *m = ReadRowsResponse{} }
|
||||
func (m *ReadRowsResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*ReadRowsResponse) ProtoMessage() {}
|
||||
func (*ReadRowsResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} }
|
||||
func (m *ReadRowsResponse) Reset() { *m = ReadRowsResponse{} }
|
||||
func (m *ReadRowsResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*ReadRowsResponse) ProtoMessage() {}
|
||||
func (*ReadRowsResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_f3f31120fcc6bad7, []int{1}
|
||||
}
|
||||
|
||||
func (m *ReadRowsResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ReadRowsResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ReadRowsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ReadRowsResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ReadRowsResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ReadRowsResponse.Merge(m, src)
|
||||
}
|
||||
func (m *ReadRowsResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_ReadRowsResponse.Size(m)
|
||||
}
|
||||
func (m *ReadRowsResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ReadRowsResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ReadRowsResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *ReadRowsResponse) GetRowKey() []byte {
|
||||
if m != nil {
|
||||
@@ -244,31 +302,58 @@ type ReadRowsResponse_Chunk struct {
|
||||
// *ReadRowsResponse_Chunk_RowContents
|
||||
// *ReadRowsResponse_Chunk_ResetRow
|
||||
// *ReadRowsResponse_Chunk_CommitRow
|
||||
Chunk isReadRowsResponse_Chunk_Chunk `protobuf_oneof:"chunk"`
|
||||
Chunk isReadRowsResponse_Chunk_Chunk `protobuf_oneof:"chunk"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ReadRowsResponse_Chunk) Reset() { *m = ReadRowsResponse_Chunk{} }
|
||||
func (m *ReadRowsResponse_Chunk) String() string { return proto.CompactTextString(m) }
|
||||
func (*ReadRowsResponse_Chunk) ProtoMessage() {}
|
||||
func (*ReadRowsResponse_Chunk) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1, 0} }
|
||||
func (m *ReadRowsResponse_Chunk) Reset() { *m = ReadRowsResponse_Chunk{} }
|
||||
func (m *ReadRowsResponse_Chunk) String() string { return proto.CompactTextString(m) }
|
||||
func (*ReadRowsResponse_Chunk) ProtoMessage() {}
|
||||
func (*ReadRowsResponse_Chunk) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_f3f31120fcc6bad7, []int{1, 0}
|
||||
}
|
||||
|
||||
func (m *ReadRowsResponse_Chunk) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ReadRowsResponse_Chunk.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ReadRowsResponse_Chunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ReadRowsResponse_Chunk.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ReadRowsResponse_Chunk) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ReadRowsResponse_Chunk.Merge(m, src)
|
||||
}
|
||||
func (m *ReadRowsResponse_Chunk) XXX_Size() int {
|
||||
return xxx_messageInfo_ReadRowsResponse_Chunk.Size(m)
|
||||
}
|
||||
func (m *ReadRowsResponse_Chunk) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ReadRowsResponse_Chunk.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ReadRowsResponse_Chunk proto.InternalMessageInfo
|
||||
|
||||
type isReadRowsResponse_Chunk_Chunk interface {
|
||||
isReadRowsResponse_Chunk_Chunk()
|
||||
}
|
||||
|
||||
type ReadRowsResponse_Chunk_RowContents struct {
|
||||
RowContents *Family `protobuf:"bytes,1,opt,name=row_contents,json=rowContents,oneof"`
|
||||
RowContents *Family `protobuf:"bytes,1,opt,name=row_contents,json=rowContents,proto3,oneof"`
|
||||
}
|
||||
|
||||
type ReadRowsResponse_Chunk_ResetRow struct {
|
||||
ResetRow bool `protobuf:"varint,2,opt,name=reset_row,json=resetRow,oneof"`
|
||||
ResetRow bool `protobuf:"varint,2,opt,name=reset_row,json=resetRow,proto3,oneof"`
|
||||
}
|
||||
|
||||
type ReadRowsResponse_Chunk_CommitRow struct {
|
||||
CommitRow bool `protobuf:"varint,3,opt,name=commit_row,json=commitRow,oneof"`
|
||||
CommitRow bool `protobuf:"varint,3,opt,name=commit_row,json=commitRow,proto3,oneof"`
|
||||
}
|
||||
|
||||
func (*ReadRowsResponse_Chunk_RowContents) isReadRowsResponse_Chunk_Chunk() {}
|
||||
func (*ReadRowsResponse_Chunk_ResetRow) isReadRowsResponse_Chunk_Chunk() {}
|
||||
func (*ReadRowsResponse_Chunk_CommitRow) isReadRowsResponse_Chunk_Chunk() {}
|
||||
|
||||
func (*ReadRowsResponse_Chunk_ResetRow) isReadRowsResponse_Chunk_Chunk() {}
|
||||
|
||||
func (*ReadRowsResponse_Chunk_CommitRow) isReadRowsResponse_Chunk_Chunk() {}
|
||||
|
||||
func (m *ReadRowsResponse_Chunk) GetChunk() isReadRowsResponse_Chunk_Chunk {
|
||||
if m != nil {
|
||||
@@ -373,14 +458,14 @@ func _ReadRowsResponse_Chunk_OneofSizer(msg proto.Message) (n int) {
|
||||
switch x := m.Chunk.(type) {
|
||||
case *ReadRowsResponse_Chunk_RowContents:
|
||||
s := proto.Size(x.RowContents)
|
||||
n += proto.SizeVarint(1<<3 | proto.WireBytes)
|
||||
n += 1 // tag and wire
|
||||
n += proto.SizeVarint(uint64(s))
|
||||
n += s
|
||||
case *ReadRowsResponse_Chunk_ResetRow:
|
||||
n += proto.SizeVarint(2<<3 | proto.WireVarint)
|
||||
n += 1 // tag and wire
|
||||
n += 1
|
||||
case *ReadRowsResponse_Chunk_CommitRow:
|
||||
n += proto.SizeVarint(3<<3 | proto.WireVarint)
|
||||
n += 1 // tag and wire
|
||||
n += 1
|
||||
case nil:
|
||||
default:
|
||||
@@ -392,13 +477,36 @@ func _ReadRowsResponse_Chunk_OneofSizer(msg proto.Message) (n int) {
|
||||
// Request message for BigtableService.SampleRowKeys.
|
||||
type SampleRowKeysRequest struct {
|
||||
// The unique name of the table from which to sample row keys.
|
||||
TableName string `protobuf:"bytes,1,opt,name=table_name,json=tableName" json:"table_name,omitempty"`
|
||||
TableName string `protobuf:"bytes,1,opt,name=table_name,json=tableName,proto3" json:"table_name,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *SampleRowKeysRequest) Reset() { *m = SampleRowKeysRequest{} }
|
||||
func (m *SampleRowKeysRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*SampleRowKeysRequest) ProtoMessage() {}
|
||||
func (*SampleRowKeysRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{2} }
|
||||
func (m *SampleRowKeysRequest) Reset() { *m = SampleRowKeysRequest{} }
|
||||
func (m *SampleRowKeysRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*SampleRowKeysRequest) ProtoMessage() {}
|
||||
func (*SampleRowKeysRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_f3f31120fcc6bad7, []int{2}
|
||||
}
|
||||
|
||||
func (m *SampleRowKeysRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_SampleRowKeysRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *SampleRowKeysRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_SampleRowKeysRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *SampleRowKeysRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_SampleRowKeysRequest.Merge(m, src)
|
||||
}
|
||||
func (m *SampleRowKeysRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_SampleRowKeysRequest.Size(m)
|
||||
}
|
||||
func (m *SampleRowKeysRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_SampleRowKeysRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_SampleRowKeysRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *SampleRowKeysRequest) GetTableName() string {
|
||||
if m != nil {
|
||||
@@ -421,13 +529,36 @@ type SampleRowKeysResponse struct {
|
||||
// "row_key". Buffering the contents of all rows between two subsequent
|
||||
// samples would require space roughly equal to the difference in their
|
||||
// "offset_bytes" fields.
|
||||
OffsetBytes int64 `protobuf:"varint,2,opt,name=offset_bytes,json=offsetBytes" json:"offset_bytes,omitempty"`
|
||||
OffsetBytes int64 `protobuf:"varint,2,opt,name=offset_bytes,json=offsetBytes,proto3" json:"offset_bytes,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *SampleRowKeysResponse) Reset() { *m = SampleRowKeysResponse{} }
|
||||
func (m *SampleRowKeysResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*SampleRowKeysResponse) ProtoMessage() {}
|
||||
func (*SampleRowKeysResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{3} }
|
||||
func (m *SampleRowKeysResponse) Reset() { *m = SampleRowKeysResponse{} }
|
||||
func (m *SampleRowKeysResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*SampleRowKeysResponse) ProtoMessage() {}
|
||||
func (*SampleRowKeysResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_f3f31120fcc6bad7, []int{3}
|
||||
}
|
||||
|
||||
func (m *SampleRowKeysResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_SampleRowKeysResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *SampleRowKeysResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_SampleRowKeysResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *SampleRowKeysResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_SampleRowKeysResponse.Merge(m, src)
|
||||
}
|
||||
func (m *SampleRowKeysResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_SampleRowKeysResponse.Size(m)
|
||||
}
|
||||
func (m *SampleRowKeysResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_SampleRowKeysResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_SampleRowKeysResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *SampleRowKeysResponse) GetRowKey() []byte {
|
||||
if m != nil {
|
||||
@@ -446,19 +577,42 @@ func (m *SampleRowKeysResponse) GetOffsetBytes() int64 {
|
||||
// Request message for BigtableService.MutateRow.
|
||||
type MutateRowRequest struct {
|
||||
// The unique name of the table to which the mutation should be applied.
|
||||
TableName string `protobuf:"bytes,1,opt,name=table_name,json=tableName" json:"table_name,omitempty"`
|
||||
TableName string `protobuf:"bytes,1,opt,name=table_name,json=tableName,proto3" json:"table_name,omitempty"`
|
||||
// The key of the row to which the mutation should be applied.
|
||||
RowKey []byte `protobuf:"bytes,2,opt,name=row_key,json=rowKey,proto3" json:"row_key,omitempty"`
|
||||
// Changes to be atomically applied to the specified row. Entries are applied
|
||||
// in order, meaning that earlier mutations can be masked by later ones.
|
||||
// Must contain at least one entry and at most 100000.
|
||||
Mutations []*Mutation `protobuf:"bytes,3,rep,name=mutations" json:"mutations,omitempty"`
|
||||
Mutations []*Mutation `protobuf:"bytes,3,rep,name=mutations,proto3" json:"mutations,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *MutateRowRequest) Reset() { *m = MutateRowRequest{} }
|
||||
func (m *MutateRowRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*MutateRowRequest) ProtoMessage() {}
|
||||
func (*MutateRowRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{4} }
|
||||
func (m *MutateRowRequest) Reset() { *m = MutateRowRequest{} }
|
||||
func (m *MutateRowRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*MutateRowRequest) ProtoMessage() {}
|
||||
func (*MutateRowRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_f3f31120fcc6bad7, []int{4}
|
||||
}
|
||||
|
||||
func (m *MutateRowRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_MutateRowRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *MutateRowRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_MutateRowRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *MutateRowRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_MutateRowRequest.Merge(m, src)
|
||||
}
|
||||
func (m *MutateRowRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_MutateRowRequest.Size(m)
|
||||
}
|
||||
func (m *MutateRowRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_MutateRowRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_MutateRowRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *MutateRowRequest) GetTableName() string {
|
||||
if m != nil {
|
||||
@@ -484,19 +638,42 @@ func (m *MutateRowRequest) GetMutations() []*Mutation {
|
||||
// Request message for BigtableService.MutateRows.
|
||||
type MutateRowsRequest struct {
|
||||
// The unique name of the table to which the mutations should be applied.
|
||||
TableName string `protobuf:"bytes,1,opt,name=table_name,json=tableName" json:"table_name,omitempty"`
|
||||
TableName string `protobuf:"bytes,1,opt,name=table_name,json=tableName,proto3" json:"table_name,omitempty"`
|
||||
// The row keys/mutations to be applied in bulk.
|
||||
// Each entry is applied as an atomic mutation, but the entries may be
|
||||
// applied in arbitrary order (even between entries for the same row).
|
||||
// At least one entry must be specified, and in total the entries may
|
||||
// contain at most 100000 mutations.
|
||||
Entries []*MutateRowsRequest_Entry `protobuf:"bytes,2,rep,name=entries" json:"entries,omitempty"`
|
||||
Entries []*MutateRowsRequest_Entry `protobuf:"bytes,2,rep,name=entries,proto3" json:"entries,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *MutateRowsRequest) Reset() { *m = MutateRowsRequest{} }
|
||||
func (m *MutateRowsRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*MutateRowsRequest) ProtoMessage() {}
|
||||
func (*MutateRowsRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{5} }
|
||||
func (m *MutateRowsRequest) Reset() { *m = MutateRowsRequest{} }
|
||||
func (m *MutateRowsRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*MutateRowsRequest) ProtoMessage() {}
|
||||
func (*MutateRowsRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_f3f31120fcc6bad7, []int{5}
|
||||
}
|
||||
|
||||
func (m *MutateRowsRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_MutateRowsRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *MutateRowsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_MutateRowsRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *MutateRowsRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_MutateRowsRequest.Merge(m, src)
|
||||
}
|
||||
func (m *MutateRowsRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_MutateRowsRequest.Size(m)
|
||||
}
|
||||
func (m *MutateRowsRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_MutateRowsRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_MutateRowsRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *MutateRowsRequest) GetTableName() string {
|
||||
if m != nil {
|
||||
@@ -519,13 +696,36 @@ type MutateRowsRequest_Entry struct {
|
||||
// applied in order, meaning that earlier mutations can be masked by
|
||||
// later ones.
|
||||
// At least one mutation must be specified.
|
||||
Mutations []*Mutation `protobuf:"bytes,2,rep,name=mutations" json:"mutations,omitempty"`
|
||||
Mutations []*Mutation `protobuf:"bytes,2,rep,name=mutations,proto3" json:"mutations,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *MutateRowsRequest_Entry) Reset() { *m = MutateRowsRequest_Entry{} }
|
||||
func (m *MutateRowsRequest_Entry) String() string { return proto.CompactTextString(m) }
|
||||
func (*MutateRowsRequest_Entry) ProtoMessage() {}
|
||||
func (*MutateRowsRequest_Entry) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{5, 0} }
|
||||
func (m *MutateRowsRequest_Entry) Reset() { *m = MutateRowsRequest_Entry{} }
|
||||
func (m *MutateRowsRequest_Entry) String() string { return proto.CompactTextString(m) }
|
||||
func (*MutateRowsRequest_Entry) ProtoMessage() {}
|
||||
func (*MutateRowsRequest_Entry) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_f3f31120fcc6bad7, []int{5, 0}
|
||||
}
|
||||
|
||||
func (m *MutateRowsRequest_Entry) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_MutateRowsRequest_Entry.Unmarshal(m, b)
|
||||
}
|
||||
func (m *MutateRowsRequest_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_MutateRowsRequest_Entry.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *MutateRowsRequest_Entry) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_MutateRowsRequest_Entry.Merge(m, src)
|
||||
}
|
||||
func (m *MutateRowsRequest_Entry) XXX_Size() int {
|
||||
return xxx_messageInfo_MutateRowsRequest_Entry.Size(m)
|
||||
}
|
||||
func (m *MutateRowsRequest_Entry) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_MutateRowsRequest_Entry.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_MutateRowsRequest_Entry proto.InternalMessageInfo
|
||||
|
||||
func (m *MutateRowsRequest_Entry) GetRowKey() []byte {
|
||||
if m != nil {
|
||||
@@ -548,15 +748,38 @@ type MutateRowsResponse struct {
|
||||
// Depending on how requests are batched during execution, it is possible
|
||||
// for one Entry to fail due to an error with another Entry. In the event
|
||||
// that this occurs, the same error will be reported for both entries.
|
||||
Statuses []*google_rpc.Status `protobuf:"bytes,1,rep,name=statuses" json:"statuses,omitempty"`
|
||||
Statuses []*status.Status `protobuf:"bytes,1,rep,name=statuses,proto3" json:"statuses,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *MutateRowsResponse) Reset() { *m = MutateRowsResponse{} }
|
||||
func (m *MutateRowsResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*MutateRowsResponse) ProtoMessage() {}
|
||||
func (*MutateRowsResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{6} }
|
||||
func (m *MutateRowsResponse) Reset() { *m = MutateRowsResponse{} }
|
||||
func (m *MutateRowsResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*MutateRowsResponse) ProtoMessage() {}
|
||||
func (*MutateRowsResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_f3f31120fcc6bad7, []int{6}
|
||||
}
|
||||
|
||||
func (m *MutateRowsResponse) GetStatuses() []*google_rpc.Status {
|
||||
func (m *MutateRowsResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_MutateRowsResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *MutateRowsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_MutateRowsResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *MutateRowsResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_MutateRowsResponse.Merge(m, src)
|
||||
}
|
||||
func (m *MutateRowsResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_MutateRowsResponse.Size(m)
|
||||
}
|
||||
func (m *MutateRowsResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_MutateRowsResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_MutateRowsResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *MutateRowsResponse) GetStatuses() []*status.Status {
|
||||
if m != nil {
|
||||
return m.Statuses
|
||||
}
|
||||
@@ -567,32 +790,55 @@ func (m *MutateRowsResponse) GetStatuses() []*google_rpc.Status {
|
||||
type CheckAndMutateRowRequest struct {
|
||||
// The unique name of the table to which the conditional mutation should be
|
||||
// applied.
|
||||
TableName string `protobuf:"bytes,1,opt,name=table_name,json=tableName" json:"table_name,omitempty"`
|
||||
TableName string `protobuf:"bytes,1,opt,name=table_name,json=tableName,proto3" json:"table_name,omitempty"`
|
||||
// The key of the row to which the conditional mutation should be applied.
|
||||
RowKey []byte `protobuf:"bytes,2,opt,name=row_key,json=rowKey,proto3" json:"row_key,omitempty"`
|
||||
// The filter to be applied to the contents of the specified row. Depending
|
||||
// on whether or not any results are yielded, either "true_mutations" or
|
||||
// "false_mutations" will be executed. If unset, checks that the row contains
|
||||
// any values at all.
|
||||
PredicateFilter *RowFilter `protobuf:"bytes,6,opt,name=predicate_filter,json=predicateFilter" json:"predicate_filter,omitempty"`
|
||||
PredicateFilter *RowFilter `protobuf:"bytes,6,opt,name=predicate_filter,json=predicateFilter,proto3" json:"predicate_filter,omitempty"`
|
||||
// Changes to be atomically applied to the specified row if "predicate_filter"
|
||||
// yields at least one cell when applied to "row_key". Entries are applied in
|
||||
// order, meaning that earlier mutations can be masked by later ones.
|
||||
// Must contain at least one entry if "false_mutations" is empty, and at most
|
||||
// 100000.
|
||||
TrueMutations []*Mutation `protobuf:"bytes,4,rep,name=true_mutations,json=trueMutations" json:"true_mutations,omitempty"`
|
||||
TrueMutations []*Mutation `protobuf:"bytes,4,rep,name=true_mutations,json=trueMutations,proto3" json:"true_mutations,omitempty"`
|
||||
// Changes to be atomically applied to the specified row if "predicate_filter"
|
||||
// does not yield any cells when applied to "row_key". Entries are applied in
|
||||
// order, meaning that earlier mutations can be masked by later ones.
|
||||
// Must contain at least one entry if "true_mutations" is empty, and at most
|
||||
// 100000.
|
||||
FalseMutations []*Mutation `protobuf:"bytes,5,rep,name=false_mutations,json=falseMutations" json:"false_mutations,omitempty"`
|
||||
FalseMutations []*Mutation `protobuf:"bytes,5,rep,name=false_mutations,json=falseMutations,proto3" json:"false_mutations,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *CheckAndMutateRowRequest) Reset() { *m = CheckAndMutateRowRequest{} }
|
||||
func (m *CheckAndMutateRowRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*CheckAndMutateRowRequest) ProtoMessage() {}
|
||||
func (*CheckAndMutateRowRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{7} }
|
||||
func (m *CheckAndMutateRowRequest) Reset() { *m = CheckAndMutateRowRequest{} }
|
||||
func (m *CheckAndMutateRowRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*CheckAndMutateRowRequest) ProtoMessage() {}
|
||||
func (*CheckAndMutateRowRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_f3f31120fcc6bad7, []int{7}
|
||||
}
|
||||
|
||||
func (m *CheckAndMutateRowRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_CheckAndMutateRowRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *CheckAndMutateRowRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_CheckAndMutateRowRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *CheckAndMutateRowRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_CheckAndMutateRowRequest.Merge(m, src)
|
||||
}
|
||||
func (m *CheckAndMutateRowRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_CheckAndMutateRowRequest.Size(m)
|
||||
}
|
||||
func (m *CheckAndMutateRowRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_CheckAndMutateRowRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_CheckAndMutateRowRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *CheckAndMutateRowRequest) GetTableName() string {
|
||||
if m != nil {
|
||||
@@ -633,13 +879,36 @@ func (m *CheckAndMutateRowRequest) GetFalseMutations() []*Mutation {
|
||||
type CheckAndMutateRowResponse struct {
|
||||
// Whether or not the request's "predicate_filter" yielded any results for
|
||||
// the specified row.
|
||||
PredicateMatched bool `protobuf:"varint,1,opt,name=predicate_matched,json=predicateMatched" json:"predicate_matched,omitempty"`
|
||||
PredicateMatched bool `protobuf:"varint,1,opt,name=predicate_matched,json=predicateMatched,proto3" json:"predicate_matched,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *CheckAndMutateRowResponse) Reset() { *m = CheckAndMutateRowResponse{} }
|
||||
func (m *CheckAndMutateRowResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*CheckAndMutateRowResponse) ProtoMessage() {}
|
||||
func (*CheckAndMutateRowResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{8} }
|
||||
func (m *CheckAndMutateRowResponse) Reset() { *m = CheckAndMutateRowResponse{} }
|
||||
func (m *CheckAndMutateRowResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*CheckAndMutateRowResponse) ProtoMessage() {}
|
||||
func (*CheckAndMutateRowResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_f3f31120fcc6bad7, []int{8}
|
||||
}
|
||||
|
||||
func (m *CheckAndMutateRowResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_CheckAndMutateRowResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *CheckAndMutateRowResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_CheckAndMutateRowResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *CheckAndMutateRowResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_CheckAndMutateRowResponse.Merge(m, src)
|
||||
}
|
||||
func (m *CheckAndMutateRowResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_CheckAndMutateRowResponse.Size(m)
|
||||
}
|
||||
func (m *CheckAndMutateRowResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_CheckAndMutateRowResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_CheckAndMutateRowResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *CheckAndMutateRowResponse) GetPredicateMatched() bool {
|
||||
if m != nil {
|
||||
@@ -652,19 +921,42 @@ func (m *CheckAndMutateRowResponse) GetPredicateMatched() bool {
|
||||
type ReadModifyWriteRowRequest struct {
|
||||
// The unique name of the table to which the read/modify/write rules should be
|
||||
// applied.
|
||||
TableName string `protobuf:"bytes,1,opt,name=table_name,json=tableName" json:"table_name,omitempty"`
|
||||
TableName string `protobuf:"bytes,1,opt,name=table_name,json=tableName,proto3" json:"table_name,omitempty"`
|
||||
// The key of the row to which the read/modify/write rules should be applied.
|
||||
RowKey []byte `protobuf:"bytes,2,opt,name=row_key,json=rowKey,proto3" json:"row_key,omitempty"`
|
||||
// Rules specifying how the specified row's contents are to be transformed
|
||||
// into writes. Entries are applied in order, meaning that earlier rules will
|
||||
// affect the results of later ones.
|
||||
Rules []*ReadModifyWriteRule `protobuf:"bytes,3,rep,name=rules" json:"rules,omitempty"`
|
||||
Rules []*ReadModifyWriteRule `protobuf:"bytes,3,rep,name=rules,proto3" json:"rules,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ReadModifyWriteRowRequest) Reset() { *m = ReadModifyWriteRowRequest{} }
|
||||
func (m *ReadModifyWriteRowRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*ReadModifyWriteRowRequest) ProtoMessage() {}
|
||||
func (*ReadModifyWriteRowRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{9} }
|
||||
func (m *ReadModifyWriteRowRequest) Reset() { *m = ReadModifyWriteRowRequest{} }
|
||||
func (m *ReadModifyWriteRowRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*ReadModifyWriteRowRequest) ProtoMessage() {}
|
||||
func (*ReadModifyWriteRowRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_f3f31120fcc6bad7, []int{9}
|
||||
}
|
||||
|
||||
func (m *ReadModifyWriteRowRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ReadModifyWriteRowRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ReadModifyWriteRowRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ReadModifyWriteRowRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ReadModifyWriteRowRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ReadModifyWriteRowRequest.Merge(m, src)
|
||||
}
|
||||
func (m *ReadModifyWriteRowRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_ReadModifyWriteRowRequest.Size(m)
|
||||
}
|
||||
func (m *ReadModifyWriteRowRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ReadModifyWriteRowRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ReadModifyWriteRowRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *ReadModifyWriteRowRequest) GetTableName() string {
|
||||
if m != nil {
|
||||
@@ -702,9 +994,11 @@ func init() {
|
||||
proto.RegisterType((*ReadModifyWriteRowRequest)(nil), "google.bigtable.v1.ReadModifyWriteRowRequest")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("google/bigtable/v1/bigtable_service_messages.proto", fileDescriptor2) }
|
||||
func init() {
|
||||
proto.RegisterFile("google/bigtable/v1/bigtable_service_messages.proto", fileDescriptor_f3f31120fcc6bad7)
|
||||
}
|
||||
|
||||
var fileDescriptor2 = []byte{
|
||||
var fileDescriptor_f3f31120fcc6bad7 = []byte{
|
||||
// 788 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0x5f, 0x8b, 0x23, 0x45,
|
||||
0x10, 0xdf, 0x49, 0xcc, 0xbf, 0x4a, 0xdc, 0xdd, 0x6b, 0xce, 0xdb, 0xd9, 0x70, 0x8b, 0x71, 0x10,
|
||||
|
843
vendor/google.golang.org/genproto/googleapis/bigtable/v2/bigtable.pb.go
generated
vendored
843
vendor/google.golang.org/genproto/googleapis/bigtable/v2/bigtable.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
1083
vendor/google.golang.org/genproto/googleapis/bigtable/v2/data.pb.go
generated
vendored
1083
vendor/google.golang.org/genproto/googleapis/bigtable/v2/data.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user