Firebase Update
This commit is contained in:
94
express-server/node_modules/google-proto-files/google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto
generated
vendored
Normal file
94
express-server/node_modules/google-proto-files/google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto
generated
vendored
Normal file
@ -0,0 +1,94 @@
|
||||
// Copyright 2017 Google Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.bigtable.admin.cluster.v1;
|
||||
|
||||
import "google/api/annotations.proto";
|
||||
import "google/longrunning/operations.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/cluster/v1;cluster";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "BigtableClusterDataProto";
|
||||
option java_package = "com.google.bigtable.admin.cluster.v1";
|
||||
|
||||
|
||||
// A physical location in which a particular project can allocate Cloud BigTable
|
||||
// resources.
|
||||
message Zone {
|
||||
// Possible states of a zone.
|
||||
enum Status {
|
||||
// The state of the zone is unknown or unspecified.
|
||||
UNKNOWN = 0;
|
||||
|
||||
// The zone is in a good state.
|
||||
OK = 1;
|
||||
|
||||
// The zone is down for planned maintenance.
|
||||
PLANNED_MAINTENANCE = 2;
|
||||
|
||||
// The zone is down for emergency or unplanned maintenance.
|
||||
EMERGENCY_MAINENANCE = 3;
|
||||
}
|
||||
|
||||
// A permanent unique identifier for the zone.
|
||||
// Values are of the form projects/<project>/zones/[a-z][-a-z0-9]*
|
||||
string name = 1;
|
||||
|
||||
// The name of this zone as it appears in UIs.
|
||||
string display_name = 2;
|
||||
|
||||
// The current state of this zone.
|
||||
Status status = 3;
|
||||
}
|
||||
|
||||
// An isolated set of Cloud BigTable resources on which tables can be hosted.
|
||||
message Cluster {
|
||||
// A permanent unique identifier for the cluster. For technical reasons, the
|
||||
// zone in which the cluster resides is included here.
|
||||
// Values are of the form
|
||||
// projects/<project>/zones/<zone>/clusters/[a-z][-a-z0-9]*
|
||||
string name = 1;
|
||||
|
||||
// The operation currently running on the cluster, if any.
|
||||
// This cannot be set directly, only through CreateCluster, UpdateCluster,
|
||||
// or UndeleteCluster. Calls to these methods will be rejected if
|
||||
// "current_operation" is already set.
|
||||
google.longrunning.Operation current_operation = 3;
|
||||
|
||||
// The descriptive name for this cluster as it appears in UIs.
|
||||
// Must be unique per zone.
|
||||
string display_name = 4;
|
||||
|
||||
// The number of serve nodes allocated to this cluster.
|
||||
int32 serve_nodes = 5;
|
||||
|
||||
// What storage type to use for tables in this cluster. Only configurable at
|
||||
// cluster creation time. If unspecified, STORAGE_SSD will be used.
|
||||
StorageType default_storage_type = 8;
|
||||
}
|
||||
|
||||
enum StorageType {
|
||||
// The storage type used is unspecified.
|
||||
STORAGE_UNSPECIFIED = 0;
|
||||
|
||||
// Data will be stored in SSD, providing low and consistent latencies.
|
||||
STORAGE_SSD = 1;
|
||||
|
||||
// Data will be stored in HDD, providing high and less predictable
|
||||
// latencies.
|
||||
STORAGE_HDD = 2;
|
||||
}
|
130
express-server/node_modules/google-proto-files/google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto
generated
vendored
Normal file
130
express-server/node_modules/google-proto-files/google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto
generated
vendored
Normal file
@ -0,0 +1,130 @@
|
||||
// Copyright 2017 Google Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.bigtable.admin.cluster.v1;
|
||||
|
||||
import "google/api/annotations.proto";
|
||||
import "google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto";
|
||||
import "google/bigtable/admin/cluster/v1/bigtable_cluster_service_messages.proto";
|
||||
import "google/longrunning/operations.proto";
|
||||
import "google/protobuf/empty.proto";
|
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/cluster/v1;cluster";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "BigtableClusterServicesProto";
|
||||
option java_package = "com.google.bigtable.admin.cluster.v1";
|
||||
|
||||
|
||||
// Service for managing zonal Cloud Bigtable resources.
|
||||
service BigtableClusterService {
|
||||
// Lists the supported zones for the given project.
|
||||
rpc ListZones(ListZonesRequest) returns (ListZonesResponse) {
|
||||
option (google.api.http) = { get: "/v1/{name=projects/*}/zones" };
|
||||
}
|
||||
|
||||
// Gets information about a particular cluster.
|
||||
rpc GetCluster(GetClusterRequest) returns (Cluster) {
|
||||
option (google.api.http) = { get: "/v1/{name=projects/*/zones/*/clusters/*}" };
|
||||
}
|
||||
|
||||
// Lists all clusters in the given project, along with any zones for which
|
||||
// cluster information could not be retrieved.
|
||||
rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) {
|
||||
option (google.api.http) = { get: "/v1/{name=projects/*}/aggregated/clusters" };
|
||||
}
|
||||
|
||||
// Creates a cluster and begins preparing it to begin serving. The returned
|
||||
// cluster embeds as its "current_operation" a long-running operation which
|
||||
// can be used to track the progress of turning up the new cluster.
|
||||
// Immediately upon completion of this request:
|
||||
// * The cluster will be readable via the API, with all requested attributes
|
||||
// but no allocated resources.
|
||||
// Until completion of the embedded operation:
|
||||
// * Cancelling the operation will render the cluster immediately unreadable
|
||||
// via the API.
|
||||
// * All other attempts to modify or delete the cluster will be rejected.
|
||||
// Upon completion of the embedded operation:
|
||||
// * Billing for all successfully-allocated resources will begin (some types
|
||||
// may have lower than the requested levels).
|
||||
// * New tables can be created in the cluster.
|
||||
// * The cluster's allocated resource levels will be readable via the API.
|
||||
// The embedded operation's "metadata" field type is
|
||||
// [CreateClusterMetadata][google.bigtable.admin.cluster.v1.CreateClusterMetadata] The embedded operation's "response" field type is
|
||||
// [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful.
|
||||
rpc CreateCluster(CreateClusterRequest) returns (Cluster) {
|
||||
option (google.api.http) = { post: "/v1/{name=projects/*/zones/*}/clusters" body: "*" };
|
||||
}
|
||||
|
||||
// Updates a cluster, and begins allocating or releasing resources as
|
||||
// requested. The returned cluster embeds as its "current_operation" a
|
||||
// long-running operation which can be used to track the progress of updating
|
||||
// the cluster.
|
||||
// Immediately upon completion of this request:
|
||||
// * For resource types where a decrease in the cluster's allocation has been
|
||||
// requested, billing will be based on the newly-requested level.
|
||||
// Until completion of the embedded operation:
|
||||
// * Cancelling the operation will set its metadata's "cancelled_at_time",
|
||||
// and begin restoring resources to their pre-request values. The operation
|
||||
// is guaranteed to succeed at undoing all resource changes, after which
|
||||
// point it will terminate with a CANCELLED status.
|
||||
// * All other attempts to modify or delete the cluster will be rejected.
|
||||
// * Reading the cluster via the API will continue to give the pre-request
|
||||
// resource levels.
|
||||
// Upon completion of the embedded operation:
|
||||
// * Billing will begin for all successfully-allocated resources (some types
|
||||
// may have lower than the requested levels).
|
||||
// * All newly-reserved resources will be available for serving the cluster's
|
||||
// tables.
|
||||
// * The cluster's new resource levels will be readable via the API.
|
||||
// [UpdateClusterMetadata][google.bigtable.admin.cluster.v1.UpdateClusterMetadata] The embedded operation's "response" field type is
|
||||
// [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful.
|
||||
rpc UpdateCluster(Cluster) returns (Cluster) {
|
||||
option (google.api.http) = { put: "/v1/{name=projects/*/zones/*/clusters/*}" body: "*" };
|
||||
}
|
||||
|
||||
// Marks a cluster and all of its tables for permanent deletion in 7 days.
|
||||
// Immediately upon completion of the request:
|
||||
// * Billing will cease for all of the cluster's reserved resources.
|
||||
// * The cluster's "delete_time" field will be set 7 days in the future.
|
||||
// Soon afterward:
|
||||
// * All tables within the cluster will become unavailable.
|
||||
// Prior to the cluster's "delete_time":
|
||||
// * The cluster can be recovered with a call to UndeleteCluster.
|
||||
// * All other attempts to modify or delete the cluster will be rejected.
|
||||
// At the cluster's "delete_time":
|
||||
// * The cluster and *all of its tables* will immediately and irrevocably
|
||||
// disappear from the API, and their data will be permanently deleted.
|
||||
rpc DeleteCluster(DeleteClusterRequest) returns (google.protobuf.Empty) {
|
||||
option (google.api.http) = { delete: "/v1/{name=projects/*/zones/*/clusters/*}" };
|
||||
}
|
||||
|
||||
// Cancels the scheduled deletion of an cluster and begins preparing it to
|
||||
// resume serving. The returned operation will also be embedded as the
|
||||
// cluster's "current_operation".
|
||||
// Immediately upon completion of this request:
|
||||
// * The cluster's "delete_time" field will be unset, protecting it from
|
||||
// automatic deletion.
|
||||
// Until completion of the returned operation:
|
||||
// * The operation cannot be cancelled.
|
||||
// Upon completion of the returned operation:
|
||||
// * Billing for the cluster's resources will resume.
|
||||
// * All tables within the cluster will be available.
|
||||
// [UndeleteClusterMetadata][google.bigtable.admin.cluster.v1.UndeleteClusterMetadata] The embedded operation's "response" field type is
|
||||
// [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful.
|
||||
rpc UndeleteCluster(UndeleteClusterRequest) returns (google.longrunning.Operation) {
|
||||
option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*}:undelete" body: "" };
|
||||
}
|
||||
}
|
141
express-server/node_modules/google-proto-files/google/bigtable/admin/cluster/v1/bigtable_cluster_service_messages.proto
generated
vendored
Normal file
141
express-server/node_modules/google-proto-files/google/bigtable/admin/cluster/v1/bigtable_cluster_service_messages.proto
generated
vendored
Normal file
@ -0,0 +1,141 @@
|
||||
// Copyright 2017 Google Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.bigtable.admin.cluster.v1;
|
||||
|
||||
import "google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/cluster/v1;cluster";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "BigtableClusterServiceMessagesProto";
|
||||
option java_package = "com.google.bigtable.admin.cluster.v1";
|
||||
|
||||
|
||||
// Request message for BigtableClusterService.ListZones.
|
||||
message ListZonesRequest {
|
||||
// The unique name of the project for which a list of supported zones is
|
||||
// requested.
|
||||
// Values are of the form projects/<project>
|
||||
string name = 1;
|
||||
}
|
||||
|
||||
// Response message for BigtableClusterService.ListZones.
|
||||
message ListZonesResponse {
|
||||
// The list of requested zones.
|
||||
repeated Zone zones = 1;
|
||||
}
|
||||
|
||||
// Request message for BigtableClusterService.GetCluster.
|
||||
message GetClusterRequest {
|
||||
// The unique name of the requested cluster.
|
||||
// Values are of the form projects/<project>/zones/<zone>/clusters/<cluster>
|
||||
string name = 1;
|
||||
}
|
||||
|
||||
// Request message for BigtableClusterService.ListClusters.
|
||||
message ListClustersRequest {
|
||||
// The unique name of the project for which a list of clusters is requested.
|
||||
// Values are of the form projects/<project>
|
||||
string name = 1;
|
||||
}
|
||||
|
||||
// Response message for BigtableClusterService.ListClusters.
|
||||
message ListClustersResponse {
|
||||
// The list of requested Clusters.
|
||||
repeated Cluster clusters = 1;
|
||||
|
||||
// The zones for which clusters could not be retrieved.
|
||||
repeated Zone failed_zones = 2;
|
||||
}
|
||||
|
||||
// Request message for BigtableClusterService.CreateCluster.
|
||||
message CreateClusterRequest {
|
||||
// The unique name of the zone in which to create the cluster.
|
||||
// Values are of the form projects/<project>/zones/<zone>
|
||||
string name = 1;
|
||||
|
||||
// The id to be used when referring to the new cluster within its zone,
|
||||
// e.g. just the "test-cluster" section of the full name
|
||||
// "projects/<project>/zones/<zone>/clusters/test-cluster".
|
||||
string cluster_id = 2;
|
||||
|
||||
// The cluster to create.
|
||||
// The "name", "delete_time", and "current_operation" fields must be left
|
||||
// blank.
|
||||
Cluster cluster = 3;
|
||||
}
|
||||
|
||||
// Metadata type for the operation returned by
|
||||
// BigtableClusterService.CreateCluster.
|
||||
message CreateClusterMetadata {
|
||||
// The request which prompted the creation of this operation.
|
||||
CreateClusterRequest original_request = 1;
|
||||
|
||||
// The time at which original_request was received.
|
||||
google.protobuf.Timestamp request_time = 2;
|
||||
|
||||
// The time at which this operation failed or was completed successfully.
|
||||
google.protobuf.Timestamp finish_time = 3;
|
||||
}
|
||||
|
||||
// Metadata type for the operation returned by
|
||||
// BigtableClusterService.UpdateCluster.
|
||||
message UpdateClusterMetadata {
|
||||
// The request which prompted the creation of this operation.
|
||||
Cluster original_request = 1;
|
||||
|
||||
// The time at which original_request was received.
|
||||
google.protobuf.Timestamp request_time = 2;
|
||||
|
||||
// The time at which this operation was cancelled. If set, this operation is
|
||||
// in the process of undoing itself (which is guaranteed to succeed) and
|
||||
// cannot be cancelled again.
|
||||
google.protobuf.Timestamp cancel_time = 3;
|
||||
|
||||
// The time at which this operation failed or was completed successfully.
|
||||
google.protobuf.Timestamp finish_time = 4;
|
||||
}
|
||||
|
||||
// Request message for BigtableClusterService.DeleteCluster.
|
||||
message DeleteClusterRequest {
|
||||
// The unique name of the cluster to be deleted.
|
||||
// Values are of the form projects/<project>/zones/<zone>/clusters/<cluster>
|
||||
string name = 1;
|
||||
}
|
||||
|
||||
// Request message for BigtableClusterService.UndeleteCluster.
|
||||
message UndeleteClusterRequest {
|
||||
// The unique name of the cluster to be un-deleted.
|
||||
// Values are of the form projects/<project>/zones/<zone>/clusters/<cluster>
|
||||
string name = 1;
|
||||
}
|
||||
|
||||
// Metadata type for the operation returned by
|
||||
// BigtableClusterService.UndeleteCluster.
|
||||
message UndeleteClusterMetadata {
|
||||
// The time at which the original request was received.
|
||||
google.protobuf.Timestamp request_time = 1;
|
||||
|
||||
// The time at which this operation failed or was completed successfully.
|
||||
google.protobuf.Timestamp finish_time = 2;
|
||||
}
|
||||
|
||||
// Metadata type for operations initiated by the V2 BigtableAdmin service.
|
||||
// More complete information for such operations is available via the V2 API.
|
||||
message V2OperationMetadata {
|
||||
|
||||
}
|
126
express-server/node_modules/google-proto-files/google/bigtable/admin/table/v1/bigtable_table_data.proto
generated
vendored
Normal file
126
express-server/node_modules/google-proto-files/google/bigtable/admin/table/v1/bigtable_table_data.proto
generated
vendored
Normal file
@ -0,0 +1,126 @@
|
||||
// Copyright 2017 Google Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.bigtable.admin.table.v1;
|
||||
|
||||
import "google/longrunning/operations.proto";
|
||||
import "google/protobuf/duration.proto";
|
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/table/v1;table";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "BigtableTableDataProto";
|
||||
option java_package = "com.google.bigtable.admin.table.v1";
|
||||
|
||||
|
||||
// A collection of user data indexed by row, column, and timestamp.
|
||||
// Each table is served using the resources of its parent cluster.
|
||||
message Table {
|
||||
enum TimestampGranularity {
|
||||
MILLIS = 0;
|
||||
}
|
||||
|
||||
// A unique identifier of the form
|
||||
// <cluster_name>/tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]*
|
||||
string name = 1;
|
||||
|
||||
// If this Table is in the process of being created, the Operation used to
|
||||
// track its progress. As long as this operation is present, the Table will
|
||||
// not accept any Table Admin or Read/Write requests.
|
||||
google.longrunning.Operation current_operation = 2;
|
||||
|
||||
// The column families configured for this table, mapped by column family id.
|
||||
map<string, ColumnFamily> column_families = 3;
|
||||
|
||||
// The granularity (e.g. MILLIS, MICROS) at which timestamps are stored in
|
||||
// this table. Timestamps not matching the granularity will be rejected.
|
||||
// Cannot be changed once the table is created.
|
||||
TimestampGranularity granularity = 4;
|
||||
}
|
||||
|
||||
// A set of columns within a table which share a common configuration.
|
||||
message ColumnFamily {
|
||||
// A unique identifier of the form <table_name>/columnFamilies/[-_.a-zA-Z0-9]+
|
||||
// The last segment is the same as the "name" field in
|
||||
// google.bigtable.v1.Family.
|
||||
string name = 1;
|
||||
|
||||
// Garbage collection expression specified by the following grammar:
|
||||
// GC = EXPR
|
||||
// | "" ;
|
||||
// EXPR = EXPR, "||", EXPR (* lowest precedence *)
|
||||
// | EXPR, "&&", EXPR
|
||||
// | "(", EXPR, ")" (* highest precedence *)
|
||||
// | PROP ;
|
||||
// PROP = "version() >", NUM32
|
||||
// | "age() >", NUM64, [ UNIT ] ;
|
||||
// NUM32 = non-zero-digit { digit } ; (* # NUM32 <= 2^32 - 1 *)
|
||||
// NUM64 = non-zero-digit { digit } ; (* # NUM64 <= 2^63 - 1 *)
|
||||
// UNIT = "d" | "h" | "m" (* d=days, h=hours, m=minutes, else micros *)
|
||||
// GC expressions can be up to 500 characters in length
|
||||
//
|
||||
// The different types of PROP are defined as follows:
|
||||
// version() - cell index, counting from most recent and starting at 1
|
||||
// age() - age of the cell (current time minus cell timestamp)
|
||||
//
|
||||
// Example: "version() > 3 || (age() > 3d && version() > 1)"
|
||||
// drop cells beyond the most recent three, and drop cells older than three
|
||||
// days unless they're the most recent cell in the row/column
|
||||
//
|
||||
// Garbage collection executes opportunistically in the background, and so
|
||||
// it's possible for reads to return a cell even if it matches the active GC
|
||||
// expression for its family.
|
||||
string gc_expression = 2;
|
||||
|
||||
// Garbage collection rule specified as a protobuf.
|
||||
// Supersedes `gc_expression`.
|
||||
// Must serialize to at most 500 bytes.
|
||||
//
|
||||
// NOTE: Garbage collection executes opportunistically in the background, and
|
||||
// so it's possible for reads to return a cell even if it matches the active
|
||||
// GC expression for its family.
|
||||
GcRule gc_rule = 3;
|
||||
}
|
||||
|
||||
// Rule for determining which cells to delete during garbage collection.
|
||||
message GcRule {
|
||||
// A GcRule which deletes cells matching all of the given rules.
|
||||
message Intersection {
|
||||
// Only delete cells which would be deleted by every element of `rules`.
|
||||
repeated GcRule rules = 1;
|
||||
}
|
||||
|
||||
// A GcRule which deletes cells matching any of the given rules.
|
||||
message Union {
|
||||
// Delete cells which would be deleted by any element of `rules`.
|
||||
repeated GcRule rules = 1;
|
||||
}
|
||||
|
||||
oneof rule {
|
||||
// Delete all cells in a column except the most recent N.
|
||||
int32 max_num_versions = 1;
|
||||
|
||||
// Delete cells in a column older than the given age.
|
||||
// Values must be at least one millisecond, and will be truncated to
|
||||
// microsecond granularity.
|
||||
google.protobuf.Duration max_age = 2;
|
||||
|
||||
// Delete cells that would be deleted by every nested rule.
|
||||
Intersection intersection = 3;
|
||||
|
||||
// Delete cells that would be deleted by any nested rule.
|
||||
Union union = 4;
|
||||
}
|
||||
}
|
80
express-server/node_modules/google-proto-files/google/bigtable/admin/table/v1/bigtable_table_service.proto
generated
vendored
Normal file
80
express-server/node_modules/google-proto-files/google/bigtable/admin/table/v1/bigtable_table_service.proto
generated
vendored
Normal file
@ -0,0 +1,80 @@
|
||||
// Copyright 2017 Google Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.bigtable.admin.table.v1;
|
||||
|
||||
import "google/api/annotations.proto";
|
||||
import "google/bigtable/admin/table/v1/bigtable_table_data.proto";
|
||||
import "google/bigtable/admin/table/v1/bigtable_table_service_messages.proto";
|
||||
import "google/protobuf/empty.proto";
|
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/table/v1;table";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "BigtableTableServicesProto";
|
||||
option java_package = "com.google.bigtable.admin.table.v1";
|
||||
|
||||
|
||||
// Service for creating, configuring, and deleting Cloud Bigtable tables.
|
||||
// Provides access to the table schemas only, not the data stored within the tables.
|
||||
service BigtableTableService {
|
||||
// Creates a new table, to be served from a specified cluster.
|
||||
// The table can be created with a full set of initial column families,
|
||||
// specified in the request.
|
||||
rpc CreateTable(CreateTableRequest) returns (Table) {
|
||||
option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*}/tables" body: "*" };
|
||||
}
|
||||
|
||||
// Lists the names of all tables served from a specified cluster.
|
||||
rpc ListTables(ListTablesRequest) returns (ListTablesResponse) {
|
||||
option (google.api.http) = { get: "/v1/{name=projects/*/zones/*/clusters/*}/tables" };
|
||||
}
|
||||
|
||||
// Gets the schema of the specified table, including its column families.
|
||||
rpc GetTable(GetTableRequest) returns (Table) {
|
||||
option (google.api.http) = { get: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}" };
|
||||
}
|
||||
|
||||
// Permanently deletes a specified table and all of its data.
|
||||
rpc DeleteTable(DeleteTableRequest) returns (google.protobuf.Empty) {
|
||||
option (google.api.http) = { delete: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}" };
|
||||
}
|
||||
|
||||
// Changes the name of a specified table.
|
||||
// Cannot be used to move tables between clusters, zones, or projects.
|
||||
rpc RenameTable(RenameTableRequest) returns (google.protobuf.Empty) {
|
||||
option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}:rename" body: "*" };
|
||||
}
|
||||
|
||||
// Creates a new column family within a specified table.
|
||||
rpc CreateColumnFamily(CreateColumnFamilyRequest) returns (ColumnFamily) {
|
||||
option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}/columnFamilies" body: "*" };
|
||||
}
|
||||
|
||||
// Changes the configuration of a specified column family.
|
||||
rpc UpdateColumnFamily(ColumnFamily) returns (ColumnFamily) {
|
||||
option (google.api.http) = { put: "/v1/{name=projects/*/zones/*/clusters/*/tables/*/columnFamilies/*}" body: "*" };
|
||||
}
|
||||
|
||||
// Permanently deletes a specified column family and all of its data.
|
||||
rpc DeleteColumnFamily(DeleteColumnFamilyRequest) returns (google.protobuf.Empty) {
|
||||
option (google.api.http) = { delete: "/v1/{name=projects/*/zones/*/clusters/*/tables/*/columnFamilies/*}" };
|
||||
}
|
||||
|
||||
// Delete all rows in a table corresponding to a particular prefix
|
||||
rpc BulkDeleteRows(BulkDeleteRowsRequest) returns (google.protobuf.Empty) {
|
||||
option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}:bulkDeleteRows" body: "*" };
|
||||
}
|
||||
}
|
116
express-server/node_modules/google-proto-files/google/bigtable/admin/table/v1/bigtable_table_service_messages.proto
generated
vendored
Normal file
116
express-server/node_modules/google-proto-files/google/bigtable/admin/table/v1/bigtable_table_service_messages.proto
generated
vendored
Normal file
@ -0,0 +1,116 @@
|
||||
// Copyright 2017 Google Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.bigtable.admin.table.v1;
|
||||
|
||||
import "google/bigtable/admin/table/v1/bigtable_table_data.proto";
|
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/table/v1;table";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "BigtableTableServiceMessagesProto";
|
||||
option java_package = "com.google.bigtable.admin.table.v1";
|
||||
|
||||
|
||||
message CreateTableRequest {
|
||||
// The unique name of the cluster in which to create the new table.
|
||||
string name = 1;
|
||||
|
||||
// The name by which the new table should be referred to within the cluster,
|
||||
// e.g. "foobar" rather than "<cluster_name>/tables/foobar".
|
||||
string table_id = 2;
|
||||
|
||||
// The Table to create. The `name` field of the Table and all of its
|
||||
// ColumnFamilies must be left blank, and will be populated in the response.
|
||||
Table table = 3;
|
||||
|
||||
// The optional list of row keys that will be used to initially split the
|
||||
// table into several tablets (Tablets are similar to HBase regions).
|
||||
// Given two split keys, "s1" and "s2", three tablets will be created,
|
||||
// spanning the key ranges: [, s1), [s1, s2), [s2, ).
|
||||
//
|
||||
// Example:
|
||||
// * Row keys := ["a", "apple", "custom", "customer_1", "customer_2",
|
||||
// "other", "zz"]
|
||||
// * initial_split_keys := ["apple", "customer_1", "customer_2", "other"]
|
||||
// * Key assignment:
|
||||
// - Tablet 1 [, apple) => {"a"}.
|
||||
// - Tablet 2 [apple, customer_1) => {"apple", "custom"}.
|
||||
// - Tablet 3 [customer_1, customer_2) => {"customer_1"}.
|
||||
// - Tablet 4 [customer_2, other) => {"customer_2"}.
|
||||
// - Tablet 5 [other, ) => {"other", "zz"}.
|
||||
repeated string initial_split_keys = 4;
|
||||
}
|
||||
|
||||
message ListTablesRequest {
|
||||
// The unique name of the cluster for which tables should be listed.
|
||||
string name = 1;
|
||||
}
|
||||
|
||||
message ListTablesResponse {
|
||||
// The tables present in the requested cluster.
|
||||
// At present, only the names of the tables are populated.
|
||||
repeated Table tables = 1;
|
||||
}
|
||||
|
||||
message GetTableRequest {
|
||||
// The unique name of the requested table.
|
||||
string name = 1;
|
||||
}
|
||||
|
||||
message DeleteTableRequest {
|
||||
// The unique name of the table to be deleted.
|
||||
string name = 1;
|
||||
}
|
||||
|
||||
message RenameTableRequest {
|
||||
// The current unique name of the table.
|
||||
string name = 1;
|
||||
|
||||
// The new name by which the table should be referred to within its containing
|
||||
// cluster, e.g. "foobar" rather than "<cluster_name>/tables/foobar".
|
||||
string new_id = 2;
|
||||
}
|
||||
|
||||
message CreateColumnFamilyRequest {
|
||||
// The unique name of the table in which to create the new column family.
|
||||
string name = 1;
|
||||
|
||||
// The name by which the new column family should be referred to within the
|
||||
// table, e.g. "foobar" rather than "<table_name>/columnFamilies/foobar".
|
||||
string column_family_id = 2;
|
||||
|
||||
// The column family to create. The `name` field must be left blank.
|
||||
ColumnFamily column_family = 3;
|
||||
}
|
||||
|
||||
message DeleteColumnFamilyRequest {
|
||||
// The unique name of the column family to be deleted.
|
||||
string name = 1;
|
||||
}
|
||||
|
||||
message BulkDeleteRowsRequest {
|
||||
// The unique name of the table on which to perform the bulk delete
|
||||
string table_name = 1;
|
||||
|
||||
oneof target {
|
||||
// Delete all rows that start with this row key prefix. Prefix cannot be
|
||||
// zero length.
|
||||
bytes row_key_prefix = 2;
|
||||
|
||||
// Delete all rows in the table. Setting this to false is a no-op.
|
||||
bool delete_all_data_from_table = 3;
|
||||
}
|
||||
}
|
456
express-server/node_modules/google-proto-files/google/bigtable/admin/v2/bigtable_instance_admin.proto
generated
vendored
Normal file
456
express-server/node_modules/google-proto-files/google/bigtable/admin/v2/bigtable_instance_admin.proto
generated
vendored
Normal file
@ -0,0 +1,456 @@
|
||||
// Copyright 2018 Google LLC.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.bigtable.admin.v2;
|
||||
|
||||
import "google/api/annotations.proto";
|
||||
import "google/bigtable/admin/v2/instance.proto";
|
||||
import "google/iam/v1/iam_policy.proto";
|
||||
import "google/iam/v1/policy.proto";
|
||||
import "google/longrunning/operations.proto";
|
||||
import "google/protobuf/empty.proto";
|
||||
import "google/protobuf/field_mask.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
option csharp_namespace = "Google.Cloud.Bigtable.Admin.V2";
|
||||
option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "BigtableInstanceAdminProto";
|
||||
option java_package = "com.google.bigtable.admin.v2";
|
||||
option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2";
|
||||
|
||||
|
||||
// Service for creating, configuring, and deleting Cloud Bigtable Instances and
|
||||
// Clusters. Provides access to the Instance and Cluster schemas only, not the
|
||||
// tables' metadata or data stored in those tables.
|
||||
service BigtableInstanceAdmin {
|
||||
// Create an instance within a project.
|
||||
rpc CreateInstance(CreateInstanceRequest) returns (google.longrunning.Operation) {
|
||||
option (google.api.http) = {
|
||||
post: "/v2/{parent=projects/*}/instances"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
// Gets information about an instance.
|
||||
rpc GetInstance(GetInstanceRequest) returns (Instance) {
|
||||
option (google.api.http) = {
|
||||
get: "/v2/{name=projects/*/instances/*}"
|
||||
};
|
||||
}
|
||||
|
||||
// Lists information about instances in a project.
|
||||
rpc ListInstances(ListInstancesRequest) returns (ListInstancesResponse) {
|
||||
option (google.api.http) = {
|
||||
get: "/v2/{parent=projects/*}/instances"
|
||||
};
|
||||
}
|
||||
|
||||
// Updates an instance within a project.
|
||||
rpc UpdateInstance(Instance) returns (Instance) {
|
||||
option (google.api.http) = {
|
||||
put: "/v2/{name=projects/*/instances/*}"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
// Partially updates an instance within a project.
|
||||
rpc PartialUpdateInstance(PartialUpdateInstanceRequest) returns (google.longrunning.Operation) {
|
||||
option (google.api.http) = {
|
||||
patch: "/v2/{instance.name=projects/*/instances/*}"
|
||||
body: "instance"
|
||||
};
|
||||
}
|
||||
|
||||
// Delete an instance from a project.
|
||||
rpc DeleteInstance(DeleteInstanceRequest) returns (google.protobuf.Empty) {
|
||||
option (google.api.http) = {
|
||||
delete: "/v2/{name=projects/*/instances/*}"
|
||||
};
|
||||
}
|
||||
|
||||
// Creates a cluster within an instance.
|
||||
rpc CreateCluster(CreateClusterRequest) returns (google.longrunning.Operation) {
|
||||
option (google.api.http) = {
|
||||
post: "/v2/{parent=projects/*/instances/*}/clusters"
|
||||
body: "cluster"
|
||||
};
|
||||
}
|
||||
|
||||
// Gets information about a cluster.
|
||||
rpc GetCluster(GetClusterRequest) returns (Cluster) {
|
||||
option (google.api.http) = {
|
||||
get: "/v2/{name=projects/*/instances/*/clusters/*}"
|
||||
};
|
||||
}
|
||||
|
||||
// Lists information about clusters in an instance.
|
||||
rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) {
|
||||
option (google.api.http) = {
|
||||
get: "/v2/{parent=projects/*/instances/*}/clusters"
|
||||
};
|
||||
}
|
||||
|
||||
// Updates a cluster within an instance.
|
||||
rpc UpdateCluster(Cluster) returns (google.longrunning.Operation) {
|
||||
option (google.api.http) = {
|
||||
put: "/v2/{name=projects/*/instances/*/clusters/*}"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
// Deletes a cluster from an instance.
|
||||
rpc DeleteCluster(DeleteClusterRequest) returns (google.protobuf.Empty) {
|
||||
option (google.api.http) = {
|
||||
delete: "/v2/{name=projects/*/instances/*/clusters/*}"
|
||||
};
|
||||
}
|
||||
|
||||
// Creates an app profile within an instance.
|
||||
rpc CreateAppProfile(CreateAppProfileRequest) returns (AppProfile) {
|
||||
option (google.api.http) = {
|
||||
post: "/v2/{parent=projects/*/instances/*}/appProfiles"
|
||||
body: "app_profile"
|
||||
};
|
||||
}
|
||||
|
||||
// Gets information about an app profile.
|
||||
rpc GetAppProfile(GetAppProfileRequest) returns (AppProfile) {
|
||||
option (google.api.http) = {
|
||||
get: "/v2/{name=projects/*/instances/*/appProfiles/*}"
|
||||
};
|
||||
}
|
||||
|
||||
// Lists information about app profiles in an instance.
|
||||
rpc ListAppProfiles(ListAppProfilesRequest) returns (ListAppProfilesResponse) {
|
||||
option (google.api.http) = {
|
||||
get: "/v2/{parent=projects/*/instances/*}/appProfiles"
|
||||
};
|
||||
}
|
||||
|
||||
// Updates an app profile within an instance.
|
||||
rpc UpdateAppProfile(UpdateAppProfileRequest) returns (google.longrunning.Operation) {
|
||||
option (google.api.http) = {
|
||||
patch: "/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}"
|
||||
body: "app_profile"
|
||||
};
|
||||
}
|
||||
|
||||
// Deletes an app profile from an instance.
|
||||
rpc DeleteAppProfile(DeleteAppProfileRequest) returns (google.protobuf.Empty) {
|
||||
option (google.api.http) = {
|
||||
delete: "/v2/{name=projects/*/instances/*/appProfiles/*}"
|
||||
};
|
||||
}
|
||||
|
||||
// Gets the access control policy for an instance resource. Returns an empty
|
||||
// policy if an instance exists but does not have a policy set.
|
||||
rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) returns (google.iam.v1.Policy) {
|
||||
option (google.api.http) = {
|
||||
post: "/v2/{resource=projects/*/instances/*}:getIamPolicy"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
// Sets the access control policy on an instance resource. Replaces any
|
||||
// existing policy.
|
||||
rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest) returns (google.iam.v1.Policy) {
|
||||
option (google.api.http) = {
|
||||
post: "/v2/{resource=projects/*/instances/*}:setIamPolicy"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
// Returns permissions that the caller has on the specified instance resource.
|
||||
rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest) returns (google.iam.v1.TestIamPermissionsResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v2/{resource=projects/*/instances/*}:testIamPermissions"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Request message for BigtableInstanceAdmin.CreateInstance.
|
||||
message CreateInstanceRequest {
|
||||
// The unique name of the project in which to create the new instance.
|
||||
// Values are of the form `projects/<project>`.
|
||||
string parent = 1;
|
||||
|
||||
// The ID to be used when referring to the new instance within its project,
|
||||
// e.g., just `myinstance` rather than
|
||||
// `projects/myproject/instances/myinstance`.
|
||||
string instance_id = 2;
|
||||
|
||||
// The instance to create.
|
||||
// Fields marked `OutputOnly` must be left blank.
|
||||
Instance instance = 3;
|
||||
|
||||
// The clusters to be created within the instance, mapped by desired
|
||||
// cluster ID, e.g., just `mycluster` rather than
|
||||
// `projects/myproject/instances/myinstance/clusters/mycluster`.
|
||||
// Fields marked `OutputOnly` must be left blank.
|
||||
// Currently, at most two clusters can be specified.
|
||||
map<string, Cluster> clusters = 4;
|
||||
}
|
||||
|
||||
// Request message for BigtableInstanceAdmin.GetInstance.
|
||||
message GetInstanceRequest {
|
||||
// The unique name of the requested instance. Values are of the form
|
||||
// `projects/<project>/instances/<instance>`.
|
||||
string name = 1;
|
||||
}
|
||||
|
||||
// Request message for BigtableInstanceAdmin.ListInstances.
|
||||
message ListInstancesRequest {
|
||||
// The unique name of the project for which a list of instances is requested.
|
||||
// Values are of the form `projects/<project>`.
|
||||
string parent = 1;
|
||||
|
||||
// DEPRECATED: This field is unused and ignored.
|
||||
string page_token = 2;
|
||||
}
|
||||
|
||||
// Response message for BigtableInstanceAdmin.ListInstances.
|
||||
message ListInstancesResponse {
|
||||
// The list of requested instances.
|
||||
repeated Instance instances = 1;
|
||||
|
||||
// Locations from which Instance information could not be retrieved,
|
||||
// due to an outage or some other transient condition.
|
||||
// Instances whose Clusters are all in one of the failed locations
|
||||
// may be missing from `instances`, and Instances with at least one
|
||||
// Cluster in a failed location may only have partial information returned.
|
||||
// Values are of the form `projects/<project>/locations/<zone_id>`
|
||||
repeated string failed_locations = 2;
|
||||
|
||||
// DEPRECATED: This field is unused and ignored.
|
||||
string next_page_token = 3;
|
||||
}
|
||||
|
||||
// Request message for BigtableInstanceAdmin.PartialUpdateInstance.
|
||||
message PartialUpdateInstanceRequest {
|
||||
// The Instance which will (partially) replace the current value.
|
||||
Instance instance = 1;
|
||||
|
||||
// The subset of Instance fields which should be replaced.
|
||||
// Must be explicitly set.
|
||||
google.protobuf.FieldMask update_mask = 2;
|
||||
}
|
||||
|
||||
// Request message for BigtableInstanceAdmin.DeleteInstance.
|
||||
message DeleteInstanceRequest {
|
||||
// The unique name of the instance to be deleted.
|
||||
// Values are of the form `projects/<project>/instances/<instance>`.
|
||||
string name = 1;
|
||||
}
|
||||
|
||||
// Request message for BigtableInstanceAdmin.CreateCluster.
|
||||
message CreateClusterRequest {
|
||||
// The unique name of the instance in which to create the new cluster.
|
||||
// Values are of the form
|
||||
// `projects/<project>/instances/<instance>`.
|
||||
string parent = 1;
|
||||
|
||||
// The ID to be used when referring to the new cluster within its instance,
|
||||
// e.g., just `mycluster` rather than
|
||||
// `projects/myproject/instances/myinstance/clusters/mycluster`.
|
||||
string cluster_id = 2;
|
||||
|
||||
// The cluster to be created.
|
||||
// Fields marked `OutputOnly` must be left blank.
|
||||
Cluster cluster = 3;
|
||||
}
|
||||
|
||||
// Request message for BigtableInstanceAdmin.GetCluster.
|
||||
message GetClusterRequest {
|
||||
// The unique name of the requested cluster. Values are of the form
|
||||
// `projects/<project>/instances/<instance>/clusters/<cluster>`.
|
||||
string name = 1;
|
||||
}
|
||||
|
||||
// Request message for BigtableInstanceAdmin.ListClusters.
|
||||
message ListClustersRequest {
|
||||
// The unique name of the instance for which a list of clusters is requested.
|
||||
// Values are of the form `projects/<project>/instances/<instance>`.
|
||||
// Use `<instance> = '-'` to list Clusters for all Instances in a project,
|
||||
// e.g., `projects/myproject/instances/-`.
|
||||
string parent = 1;
|
||||
|
||||
// DEPRECATED: This field is unused and ignored.
|
||||
string page_token = 2;
|
||||
}
|
||||
|
||||
// Response message for BigtableInstanceAdmin.ListClusters.
|
||||
message ListClustersResponse {
|
||||
// The list of requested clusters.
|
||||
repeated Cluster clusters = 1;
|
||||
|
||||
// Locations from which Cluster information could not be retrieved,
|
||||
// due to an outage or some other transient condition.
|
||||
// Clusters from these locations may be missing from `clusters`,
|
||||
// or may only have partial information returned.
|
||||
// Values are of the form `projects/<project>/locations/<zone_id>`
|
||||
repeated string failed_locations = 2;
|
||||
|
||||
// DEPRECATED: This field is unused and ignored.
|
||||
string next_page_token = 3;
|
||||
}
|
||||
|
||||
// Request message for BigtableInstanceAdmin.DeleteCluster.
|
||||
message DeleteClusterRequest {
|
||||
// The unique name of the cluster to be deleted. Values are of the form
|
||||
// `projects/<project>/instances/<instance>/clusters/<cluster>`.
|
||||
string name = 1;
|
||||
}
|
||||
|
||||
// The metadata for the Operation returned by CreateInstance.
|
||||
message CreateInstanceMetadata {
|
||||
// The request that prompted the initiation of this CreateInstance operation.
|
||||
CreateInstanceRequest original_request = 1;
|
||||
|
||||
// The time at which the original request was received.
|
||||
google.protobuf.Timestamp request_time = 2;
|
||||
|
||||
// The time at which the operation failed or was completed successfully.
|
||||
google.protobuf.Timestamp finish_time = 3;
|
||||
}
|
||||
|
||||
// The metadata for the Operation returned by UpdateInstance.
|
||||
message UpdateInstanceMetadata {
|
||||
// The request that prompted the initiation of this UpdateInstance operation.
|
||||
PartialUpdateInstanceRequest original_request = 1;
|
||||
|
||||
// The time at which the original request was received.
|
||||
google.protobuf.Timestamp request_time = 2;
|
||||
|
||||
// The time at which the operation failed or was completed successfully.
|
||||
google.protobuf.Timestamp finish_time = 3;
|
||||
}
|
||||
|
||||
// The metadata for the Operation returned by CreateCluster.
|
||||
message CreateClusterMetadata {
|
||||
// The request that prompted the initiation of this CreateCluster operation.
|
||||
CreateClusterRequest original_request = 1;
|
||||
|
||||
// The time at which the original request was received.
|
||||
google.protobuf.Timestamp request_time = 2;
|
||||
|
||||
// The time at which the operation failed or was completed successfully.
|
||||
google.protobuf.Timestamp finish_time = 3;
|
||||
}
|
||||
|
||||
// The metadata for the Operation returned by UpdateCluster.
|
||||
message UpdateClusterMetadata {
|
||||
// The request that prompted the initiation of this UpdateCluster operation.
|
||||
Cluster original_request = 1;
|
||||
|
||||
// The time at which the original request was received.
|
||||
google.protobuf.Timestamp request_time = 2;
|
||||
|
||||
// The time at which the operation failed or was completed successfully.
|
||||
google.protobuf.Timestamp finish_time = 3;
|
||||
}
|
||||
|
||||
// Request message for BigtableInstanceAdmin.CreateAppProfile.
|
||||
message CreateAppProfileRequest {
|
||||
// The unique name of the instance in which to create the new app profile.
|
||||
// Values are of the form
|
||||
// `projects/<project>/instances/<instance>`.
|
||||
string parent = 1;
|
||||
|
||||
// The ID to be used when referring to the new app profile within its
|
||||
// instance, e.g., just `myprofile` rather than
|
||||
// `projects/myproject/instances/myinstance/appProfiles/myprofile`.
|
||||
string app_profile_id = 2;
|
||||
|
||||
// The app profile to be created.
|
||||
// Fields marked `OutputOnly` will be ignored.
|
||||
AppProfile app_profile = 3;
|
||||
|
||||
// If true, ignore safety checks when creating the app profile.
|
||||
bool ignore_warnings = 4;
|
||||
}
|
||||
|
||||
// Request message for BigtableInstanceAdmin.GetAppProfile.
|
||||
message GetAppProfileRequest {
|
||||
// The unique name of the requested app profile. Values are of the form
|
||||
// `projects/<project>/instances/<instance>/appProfiles/<app_profile>`.
|
||||
string name = 1;
|
||||
}
|
||||
|
||||
// Request message for BigtableInstanceAdmin.ListAppProfiles.
|
||||
message ListAppProfilesRequest {
|
||||
// The unique name of the instance for which a list of app profiles is
|
||||
// requested. Values are of the form
|
||||
// `projects/<project>/instances/<instance>`.
|
||||
// Use `<instance> = '-'` to list AppProfiles for all Instances in a project,
|
||||
// e.g., `projects/myproject/instances/-`.
|
||||
string parent = 1;
|
||||
|
||||
// Maximum number of results per page.
|
||||
// CURRENTLY UNIMPLEMENTED AND IGNORED.
|
||||
int32 page_size = 3;
|
||||
|
||||
// The value of `next_page_token` returned by a previous call.
|
||||
string page_token = 2;
|
||||
}
|
||||
|
||||
// Response message for BigtableInstanceAdmin.ListAppProfiles.
|
||||
message ListAppProfilesResponse {
|
||||
// The list of requested app profiles.
|
||||
repeated AppProfile app_profiles = 1;
|
||||
|
||||
// Set if not all app profiles could be returned in a single response.
|
||||
// Pass this value to `page_token` in another request to get the next
|
||||
// page of results.
|
||||
string next_page_token = 2;
|
||||
|
||||
// Locations from which AppProfile information could not be retrieved,
|
||||
// due to an outage or some other transient condition.
|
||||
// AppProfiles from these locations may be missing from `app_profiles`.
|
||||
// Values are of the form `projects/<project>/locations/<zone_id>`
|
||||
repeated string failed_locations = 3;
|
||||
}
|
||||
|
||||
// Request message for BigtableInstanceAdmin.UpdateAppProfile.
|
||||
message UpdateAppProfileRequest {
|
||||
// The app profile which will (partially) replace the current value.
|
||||
AppProfile app_profile = 1;
|
||||
|
||||
// The subset of app profile fields which should be replaced.
|
||||
// If unset, all fields will be replaced.
|
||||
google.protobuf.FieldMask update_mask = 2;
|
||||
|
||||
// If true, ignore safety checks when updating the app profile.
|
||||
bool ignore_warnings = 3;
|
||||
}
|
||||
|
||||
|
||||
// Request message for BigtableInstanceAdmin.DeleteAppProfile.
|
||||
message DeleteAppProfileRequest {
|
||||
// The unique name of the app profile to be deleted. Values are of the form
|
||||
// `projects/<project>/instances/<instance>/appProfiles/<app_profile>`.
|
||||
string name = 1;
|
||||
|
||||
// If true, ignore safety checks when deleting the app profile.
|
||||
bool ignore_warnings = 2;
|
||||
}
|
||||
|
||||
// The metadata for the Operation returned by UpdateAppProfile.
|
||||
message UpdateAppProfileMetadata {
|
||||
|
||||
}
|
525
express-server/node_modules/google-proto-files/google/bigtable/admin/v2/bigtable_table_admin.proto
generated
vendored
Normal file
525
express-server/node_modules/google-proto-files/google/bigtable/admin/v2/bigtable_table_admin.proto
generated
vendored
Normal file
@ -0,0 +1,525 @@
|
||||
// Copyright 2018 Google LLC.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.bigtable.admin.v2;
|
||||
|
||||
import "google/api/annotations.proto";
|
||||
import "google/bigtable/admin/v2/table.proto";
|
||||
import "google/longrunning/operations.proto";
|
||||
import "google/protobuf/duration.proto";
|
||||
import "google/protobuf/empty.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
option csharp_namespace = "Google.Cloud.Bigtable.Admin.V2";
|
||||
option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "BigtableTableAdminProto";
|
||||
option java_package = "com.google.bigtable.admin.v2";
|
||||
option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2";
|
||||
|
||||
|
||||
// Service for creating, configuring, and deleting Cloud Bigtable tables.
|
||||
//
|
||||
//
|
||||
// Provides access to the table schemas only, not the data stored within
|
||||
// the tables.
|
||||
service BigtableTableAdmin {
|
||||
// Creates a new table in the specified instance.
|
||||
// The table can be created with a full set of initial column families,
|
||||
// specified in the request.
|
||||
rpc CreateTable(CreateTableRequest) returns (Table) {
|
||||
option (google.api.http) = {
|
||||
post: "/v2/{parent=projects/*/instances/*}/tables"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
// Creates a new table from the specified snapshot. The target table must
|
||||
// not exist. The snapshot and the table must be in the same instance.
|
||||
//
|
||||
// Note: This is a private alpha release of Cloud Bigtable snapshots. This
|
||||
// feature is not currently available to most Cloud Bigtable customers. This
|
||||
// feature might be changed in backward-incompatible ways and is not
|
||||
// recommended for production use. It is not subject to any SLA or deprecation
|
||||
// policy.
|
||||
rpc CreateTableFromSnapshot(CreateTableFromSnapshotRequest) returns (google.longrunning.Operation) {
|
||||
option (google.api.http) = {
|
||||
post: "/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
// Lists all tables served from a specified instance.
|
||||
rpc ListTables(ListTablesRequest) returns (ListTablesResponse) {
|
||||
option (google.api.http) = {
|
||||
get: "/v2/{parent=projects/*/instances/*}/tables"
|
||||
};
|
||||
}
|
||||
|
||||
// Gets metadata information about the specified table.
|
||||
rpc GetTable(GetTableRequest) returns (Table) {
|
||||
option (google.api.http) = {
|
||||
get: "/v2/{name=projects/*/instances/*/tables/*}"
|
||||
};
|
||||
}
|
||||
|
||||
// Permanently deletes a specified table and all of its data.
|
||||
rpc DeleteTable(DeleteTableRequest) returns (google.protobuf.Empty) {
|
||||
option (google.api.http) = {
|
||||
delete: "/v2/{name=projects/*/instances/*/tables/*}"
|
||||
};
|
||||
}
|
||||
|
||||
// Performs a series of column family modifications on the specified table.
|
||||
// Either all or none of the modifications will occur before this method
|
||||
// returns, but data requests received prior to that point may see a table
|
||||
// where only some modifications have taken effect.
|
||||
rpc ModifyColumnFamilies(ModifyColumnFamiliesRequest) returns (Table) {
|
||||
option (google.api.http) = {
|
||||
post: "/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
// Permanently drop/delete a row range from a specified table. The request can
|
||||
// specify whether to delete all rows in a table, or only those that match a
|
||||
// particular prefix.
|
||||
rpc DropRowRange(DropRowRangeRequest) returns (google.protobuf.Empty) {
|
||||
option (google.api.http) = {
|
||||
post: "/v2/{name=projects/*/instances/*/tables/*}:dropRowRange"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
// Generates a consistency token for a Table, which can be used in
|
||||
// CheckConsistency to check whether mutations to the table that finished
|
||||
// before this call started have been replicated. The tokens will be available
|
||||
// for 90 days.
|
||||
rpc GenerateConsistencyToken(GenerateConsistencyTokenRequest) returns (GenerateConsistencyTokenResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
// Checks replication consistency based on a consistency token, that is, if
|
||||
// replication has caught up based on the conditions specified in the token
|
||||
// and the check request.
|
||||
rpc CheckConsistency(CheckConsistencyRequest) returns (CheckConsistencyResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v2/{name=projects/*/instances/*/tables/*}:checkConsistency"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
// Creates a new snapshot in the specified cluster from the specified
|
||||
// source table. The cluster and the table must be in the same instance.
|
||||
//
|
||||
// Note: This is a private alpha release of Cloud Bigtable snapshots. This
|
||||
// feature is not currently available to most Cloud Bigtable customers. This
|
||||
// feature might be changed in backward-incompatible ways and is not
|
||||
// recommended for production use. It is not subject to any SLA or deprecation
|
||||
// policy.
|
||||
rpc SnapshotTable(SnapshotTableRequest) returns (google.longrunning.Operation) {
|
||||
option (google.api.http) = {
|
||||
post: "/v2/{name=projects/*/instances/*/tables/*}:snapshot"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
// Gets metadata information about the specified snapshot.
|
||||
//
|
||||
// Note: This is a private alpha release of Cloud Bigtable snapshots. This
|
||||
// feature is not currently available to most Cloud Bigtable customers. This
|
||||
// feature might be changed in backward-incompatible ways and is not
|
||||
// recommended for production use. It is not subject to any SLA or deprecation
|
||||
// policy.
|
||||
rpc GetSnapshot(GetSnapshotRequest) returns (Snapshot) {
|
||||
option (google.api.http) = {
|
||||
get: "/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}"
|
||||
};
|
||||
}
|
||||
|
||||
// Lists all snapshots associated with the specified cluster.
|
||||
//
|
||||
// Note: This is a private alpha release of Cloud Bigtable snapshots. This
|
||||
// feature is not currently available to most Cloud Bigtable customers. This
|
||||
// feature might be changed in backward-incompatible ways and is not
|
||||
// recommended for production use. It is not subject to any SLA or deprecation
|
||||
// policy.
|
||||
rpc ListSnapshots(ListSnapshotsRequest) returns (ListSnapshotsResponse) {
|
||||
option (google.api.http) = {
|
||||
get: "/v2/{parent=projects/*/instances/*/clusters/*}/snapshots"
|
||||
};
|
||||
}
|
||||
|
||||
// Permanently deletes the specified snapshot.
|
||||
//
|
||||
// Note: This is a private alpha release of Cloud Bigtable snapshots. This
|
||||
// feature is not currently available to most Cloud Bigtable customers. This
|
||||
// feature might be changed in backward-incompatible ways and is not
|
||||
// recommended for production use. It is not subject to any SLA or deprecation
|
||||
// policy.
|
||||
rpc DeleteSnapshot(DeleteSnapshotRequest) returns (google.protobuf.Empty) {
|
||||
option (google.api.http) = {
|
||||
delete: "/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}"
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Request message for
|
||||
// [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable]
|
||||
message CreateTableRequest {
|
||||
// An initial split point for a newly created table.
|
||||
message Split {
|
||||
// Row key to use as an initial tablet boundary.
|
||||
bytes key = 1;
|
||||
}
|
||||
|
||||
// The unique name of the instance in which to create the table.
|
||||
// Values are of the form `projects/<project>/instances/<instance>`.
|
||||
string parent = 1;
|
||||
|
||||
// The name by which the new table should be referred to within the parent
|
||||
// instance, e.g., `foobar` rather than `<parent>/tables/foobar`.
|
||||
string table_id = 2;
|
||||
|
||||
// The Table to create.
|
||||
Table table = 3;
|
||||
|
||||
// The optional list of row keys that will be used to initially split the
|
||||
// table into several tablets (tablets are similar to HBase regions).
|
||||
// Given two split keys, `s1` and `s2`, three tablets will be created,
|
||||
// spanning the key ranges: `[, s1), [s1, s2), [s2, )`.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// * Row keys := `["a", "apple", "custom", "customer_1", "customer_2",`
|
||||
// `"other", "zz"]`
|
||||
// * initial_split_keys := `["apple", "customer_1", "customer_2", "other"]`
|
||||
// * Key assignment:
|
||||
// - Tablet 1 `[, apple) => {"a"}.`
|
||||
// - Tablet 2 `[apple, customer_1) => {"apple", "custom"}.`
|
||||
// - Tablet 3 `[customer_1, customer_2) => {"customer_1"}.`
|
||||
// - Tablet 4 `[customer_2, other) => {"customer_2"}.`
|
||||
// - Tablet 5 `[other, ) => {"other", "zz"}.`
|
||||
repeated Split initial_splits = 4;
|
||||
}
|
||||
|
||||
// Request message for
|
||||
// [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot]
|
||||
//
|
||||
// Note: This is a private alpha release of Cloud Bigtable snapshots. This
|
||||
// feature is not currently available to most Cloud Bigtable customers. This
|
||||
// feature might be changed in backward-incompatible ways and is not recommended
|
||||
// for production use. It is not subject to any SLA or deprecation policy.
|
||||
message CreateTableFromSnapshotRequest {
|
||||
// The unique name of the instance in which to create the table.
|
||||
// Values are of the form `projects/<project>/instances/<instance>`.
|
||||
string parent = 1;
|
||||
|
||||
// The name by which the new table should be referred to within the parent
|
||||
// instance, e.g., `foobar` rather than `<parent>/tables/foobar`.
|
||||
string table_id = 2;
|
||||
|
||||
// The unique name of the snapshot from which to restore the table. The
|
||||
// snapshot and the table must be in the same instance.
|
||||
// Values are of the form
|
||||
// `projects/<project>/instances/<instance>/clusters/<cluster>/snapshots/<snapshot>`.
|
||||
string source_snapshot = 3;
|
||||
}
|
||||
|
||||
// Request message for
|
||||
// [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange]
|
||||
message DropRowRangeRequest {
|
||||
// The unique name of the table on which to drop a range of rows.
|
||||
// Values are of the form
|
||||
// `projects/<project>/instances/<instance>/tables/<table>`.
|
||||
string name = 1;
|
||||
|
||||
// Delete all rows or by prefix.
|
||||
oneof target {
|
||||
// Delete all rows that start with this row key prefix. Prefix cannot be
|
||||
// zero length.
|
||||
bytes row_key_prefix = 2;
|
||||
|
||||
// Delete all rows in the table. Setting this to false is a no-op.
|
||||
bool delete_all_data_from_table = 3;
|
||||
}
|
||||
}
|
||||
|
||||
// Request message for
|
||||
// [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables]
|
||||
message ListTablesRequest {
|
||||
// The unique name of the instance for which tables should be listed.
|
||||
// Values are of the form `projects/<project>/instances/<instance>`.
|
||||
string parent = 1;
|
||||
|
||||
// The view to be applied to the returned tables' fields.
|
||||
// Defaults to `NAME_ONLY` if unspecified; no others are currently supported.
|
||||
Table.View view = 2;
|
||||
|
||||
// Maximum number of results per page.
|
||||
// CURRENTLY UNIMPLEMENTED AND IGNORED.
|
||||
int32 page_size = 4;
|
||||
|
||||
// The value of `next_page_token` returned by a previous call.
|
||||
string page_token = 3;
|
||||
}
|
||||
|
||||
// Response message for
|
||||
// [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables]
|
||||
message ListTablesResponse {
|
||||
// The tables present in the requested instance.
|
||||
repeated Table tables = 1;
|
||||
|
||||
// Set if not all tables could be returned in a single response.
|
||||
// Pass this value to `page_token` in another request to get the next
|
||||
// page of results.
|
||||
string next_page_token = 2;
|
||||
}
|
||||
|
||||
// Request message for
|
||||
// [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable]
|
||||
message GetTableRequest {
|
||||
// The unique name of the requested table.
|
||||
// Values are of the form
|
||||
// `projects/<project>/instances/<instance>/tables/<table>`.
|
||||
string name = 1;
|
||||
|
||||
// The view to be applied to the returned table's fields.
|
||||
// Defaults to `SCHEMA_VIEW` if unspecified.
|
||||
Table.View view = 2;
|
||||
}
|
||||
|
||||
// Request message for
|
||||
// [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable]
|
||||
message DeleteTableRequest {
|
||||
// The unique name of the table to be deleted.
|
||||
// Values are of the form
|
||||
// `projects/<project>/instances/<instance>/tables/<table>`.
|
||||
string name = 1;
|
||||
}
|
||||
|
||||
// Request message for
|
||||
// [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies]
|
||||
message ModifyColumnFamiliesRequest {
|
||||
// A create, update, or delete of a particular column family.
|
||||
message Modification {
|
||||
// The ID of the column family to be modified.
|
||||
string id = 1;
|
||||
|
||||
// Column familiy modifications.
|
||||
oneof mod {
|
||||
// Create a new column family with the specified schema, or fail if
|
||||
// one already exists with the given ID.
|
||||
ColumnFamily create = 2;
|
||||
|
||||
// Update an existing column family to the specified schema, or fail
|
||||
// if no column family exists with the given ID.
|
||||
ColumnFamily update = 3;
|
||||
|
||||
// Drop (delete) the column family with the given ID, or fail if no such
|
||||
// family exists.
|
||||
bool drop = 4;
|
||||
}
|
||||
}
|
||||
|
||||
// The unique name of the table whose families should be modified.
|
||||
// Values are of the form
|
||||
// `projects/<project>/instances/<instance>/tables/<table>`.
|
||||
string name = 1;
|
||||
|
||||
// Modifications to be atomically applied to the specified table's families.
|
||||
// Entries are applied in order, meaning that earlier modifications can be
|
||||
// masked by later ones (in the case of repeated updates to the same family,
|
||||
// for example).
|
||||
repeated Modification modifications = 2;
|
||||
}
|
||||
|
||||
// Request message for
|
||||
// [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken]
|
||||
message GenerateConsistencyTokenRequest {
|
||||
// The unique name of the Table for which to create a consistency token.
|
||||
// Values are of the form
|
||||
// `projects/<project>/instances/<instance>/tables/<table>`.
|
||||
string name = 1;
|
||||
}
|
||||
|
||||
// Response message for
|
||||
// [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken]
|
||||
message GenerateConsistencyTokenResponse {
|
||||
// The generated consistency token.
|
||||
string consistency_token = 1;
|
||||
}
|
||||
|
||||
// Request message for
|
||||
// [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency]
|
||||
message CheckConsistencyRequest {
|
||||
// The unique name of the Table for which to check replication consistency.
|
||||
// Values are of the form
|
||||
// `projects/<project>/instances/<instance>/tables/<table>`.
|
||||
string name = 1;
|
||||
|
||||
// The token created using GenerateConsistencyToken for the Table.
|
||||
string consistency_token = 2;
|
||||
}
|
||||
|
||||
// Response message for
|
||||
// [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency]
|
||||
message CheckConsistencyResponse {
|
||||
// True only if the token is consistent. A token is consistent if replication
|
||||
// has caught up with the restrictions specified in the request.
|
||||
bool consistent = 1;
|
||||
}
|
||||
|
||||
// Request message for
|
||||
// [google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable]
|
||||
//
|
||||
// Note: This is a private alpha release of Cloud Bigtable snapshots. This
|
||||
// feature is not currently available to most Cloud Bigtable customers. This
|
||||
// feature might be changed in backward-incompatible ways and is not recommended
|
||||
// for production use. It is not subject to any SLA or deprecation policy.
|
||||
message SnapshotTableRequest {
|
||||
// The unique name of the table to have the snapshot taken.
|
||||
// Values are of the form
|
||||
// `projects/<project>/instances/<instance>/tables/<table>`.
|
||||
string name = 1;
|
||||
|
||||
// The name of the cluster where the snapshot will be created in.
|
||||
// Values are of the form
|
||||
// `projects/<project>/instances/<instance>/clusters/<cluster>`.
|
||||
string cluster = 2;
|
||||
|
||||
// The ID by which the new snapshot should be referred to within the parent
|
||||
// cluster, e.g., `mysnapshot` of the form: `[_a-zA-Z0-9][-_.a-zA-Z0-9]*`
|
||||
// rather than
|
||||
// `projects/<project>/instances/<instance>/clusters/<cluster>/snapshots/mysnapshot`.
|
||||
string snapshot_id = 3;
|
||||
|
||||
// The amount of time that the new snapshot can stay active after it is
|
||||
// created. Once 'ttl' expires, the snapshot will get deleted. The maximum
|
||||
// amount of time a snapshot can stay active is 7 days. If 'ttl' is not
|
||||
// specified, the default value of 24 hours will be used.
|
||||
google.protobuf.Duration ttl = 4;
|
||||
|
||||
// Description of the snapshot.
|
||||
string description = 5;
|
||||
}
|
||||
|
||||
// Request message for
|
||||
// [google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot]
|
||||
//
|
||||
// Note: This is a private alpha release of Cloud Bigtable snapshots. This
|
||||
// feature is not currently available to most Cloud Bigtable customers. This
|
||||
// feature might be changed in backward-incompatible ways and is not recommended
|
||||
// for production use. It is not subject to any SLA or deprecation policy.
|
||||
message GetSnapshotRequest {
|
||||
// The unique name of the requested snapshot.
|
||||
// Values are of the form
|
||||
// `projects/<project>/instances/<instance>/clusters/<cluster>/snapshots/<snapshot>`.
|
||||
string name = 1;
|
||||
}
|
||||
|
||||
// Request message for
|
||||
// [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots]
|
||||
//
|
||||
// Note: This is a private alpha release of Cloud Bigtable snapshots. This
|
||||
// feature is not currently available to most Cloud Bigtable customers. This
|
||||
// feature might be changed in backward-incompatible ways and is not recommended
|
||||
// for production use. It is not subject to any SLA or deprecation policy.
|
||||
message ListSnapshotsRequest {
|
||||
// The unique name of the cluster for which snapshots should be listed.
|
||||
// Values are of the form
|
||||
// `projects/<project>/instances/<instance>/clusters/<cluster>`.
|
||||
// Use `<cluster> = '-'` to list snapshots for all clusters in an instance,
|
||||
// e.g., `projects/<project>/instances/<instance>/clusters/-`.
|
||||
string parent = 1;
|
||||
|
||||
// The maximum number of snapshots to return per page.
|
||||
// CURRENTLY UNIMPLEMENTED AND IGNORED.
|
||||
int32 page_size = 2;
|
||||
|
||||
// The value of `next_page_token` returned by a previous call.
|
||||
string page_token = 3;
|
||||
}
|
||||
|
||||
// Response message for
|
||||
// [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots]
|
||||
//
|
||||
// Note: This is a private alpha release of Cloud Bigtable snapshots. This
|
||||
// feature is not currently available to most Cloud Bigtable customers. This
|
||||
// feature might be changed in backward-incompatible ways and is not recommended
|
||||
// for production use. It is not subject to any SLA or deprecation policy.
|
||||
message ListSnapshotsResponse {
|
||||
// The snapshots present in the requested cluster.
|
||||
repeated Snapshot snapshots = 1;
|
||||
|
||||
// Set if not all snapshots could be returned in a single response.
|
||||
// Pass this value to `page_token` in another request to get the next
|
||||
// page of results.
|
||||
string next_page_token = 2;
|
||||
}
|
||||
|
||||
// Request message for
|
||||
// [google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot]
|
||||
//
|
||||
// Note: This is a private alpha release of Cloud Bigtable snapshots. This
|
||||
// feature is not currently available to most Cloud Bigtable customers. This
|
||||
// feature might be changed in backward-incompatible ways and is not recommended
|
||||
// for production use. It is not subject to any SLA or deprecation policy.
|
||||
message DeleteSnapshotRequest {
|
||||
// The unique name of the snapshot to be deleted.
|
||||
// Values are of the form
|
||||
// `projects/<project>/instances/<instance>/clusters/<cluster>/snapshots/<snapshot>`.
|
||||
string name = 1;
|
||||
}
|
||||
|
||||
// The metadata for the Operation returned by SnapshotTable.
|
||||
//
|
||||
// Note: This is a private alpha release of Cloud Bigtable snapshots. This
|
||||
// feature is not currently available to most Cloud Bigtable customers. This
|
||||
// feature might be changed in backward-incompatible ways and is not recommended
|
||||
// for production use. It is not subject to any SLA or deprecation policy.
|
||||
message SnapshotTableMetadata {
|
||||
// The request that prompted the initiation of this SnapshotTable operation.
|
||||
SnapshotTableRequest original_request = 1;
|
||||
|
||||
// The time at which the original request was received.
|
||||
google.protobuf.Timestamp request_time = 2;
|
||||
|
||||
// The time at which the operation failed or was completed successfully.
|
||||
google.protobuf.Timestamp finish_time = 3;
|
||||
}
|
||||
|
||||
// The metadata for the Operation returned by CreateTableFromSnapshot.
|
||||
//
|
||||
// Note: This is a private alpha release of Cloud Bigtable snapshots. This
|
||||
// feature is not currently available to most Cloud Bigtable customers. This
|
||||
// feature might be changed in backward-incompatible ways and is not recommended
|
||||
// for production use. It is not subject to any SLA or deprecation policy.
|
||||
message CreateTableFromSnapshotMetadata {
|
||||
// The request that prompted the initiation of this CreateTableFromSnapshot
|
||||
// operation.
|
||||
CreateTableFromSnapshotRequest original_request = 1;
|
||||
|
||||
// The time at which the original request was received.
|
||||
google.protobuf.Timestamp request_time = 2;
|
||||
|
||||
// The time at which the operation failed or was completed successfully.
|
||||
google.protobuf.Timestamp finish_time = 3;
|
||||
}
|
41
express-server/node_modules/google-proto-files/google/bigtable/admin/v2/common.proto
generated
vendored
Normal file
41
express-server/node_modules/google-proto-files/google/bigtable/admin/v2/common.proto
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
// Copyright 2018 Google LLC.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.bigtable.admin.v2;
|
||||
|
||||
import "google/api/annotations.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
option csharp_namespace = "Google.Cloud.Bigtable.Admin.V2";
|
||||
option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "CommonProto";
|
||||
option java_package = "com.google.bigtable.admin.v2";
|
||||
option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2";
|
||||
|
||||
|
||||
// Storage media types for persisting Bigtable data.
|
||||
enum StorageType {
|
||||
// The user did not specify a storage type.
|
||||
STORAGE_TYPE_UNSPECIFIED = 0;
|
||||
|
||||
// Flash (SSD) storage should be used.
|
||||
SSD = 1;
|
||||
|
||||
// Magnetic drive (HDD) storage should be used.
|
||||
HDD = 2;
|
||||
}
|
208
express-server/node_modules/google-proto-files/google/bigtable/admin/v2/instance.proto
generated
vendored
Normal file
208
express-server/node_modules/google-proto-files/google/bigtable/admin/v2/instance.proto
generated
vendored
Normal file
@ -0,0 +1,208 @@
|
||||
// Copyright 2018 Google LLC.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.bigtable.admin.v2;
|
||||
|
||||
import "google/api/annotations.proto";
|
||||
import "google/bigtable/admin/v2/common.proto";
|
||||
|
||||
option csharp_namespace = "Google.Cloud.Bigtable.Admin.V2";
|
||||
option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "InstanceProto";
|
||||
option java_package = "com.google.bigtable.admin.v2";
|
||||
option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2";
|
||||
|
||||
|
||||
// A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and
|
||||
// the resources that serve them.
|
||||
// All tables in an instance are served from a single
|
||||
// [Cluster][google.bigtable.admin.v2.Cluster].
|
||||
message Instance {
|
||||
// Possible states of an instance.
|
||||
enum State {
|
||||
// The state of the instance could not be determined.
|
||||
STATE_NOT_KNOWN = 0;
|
||||
|
||||
// The instance has been successfully created and can serve requests
|
||||
// to its tables.
|
||||
READY = 1;
|
||||
|
||||
// The instance is currently being created, and may be destroyed
|
||||
// if the creation process encounters an error.
|
||||
CREATING = 2;
|
||||
}
|
||||
|
||||
// The type of the instance.
|
||||
enum Type {
|
||||
// The type of the instance is unspecified. If set when creating an
|
||||
// instance, a `PRODUCTION` instance will be created. If set when updating
|
||||
// an instance, the type will be left unchanged.
|
||||
TYPE_UNSPECIFIED = 0;
|
||||
|
||||
// An instance meant for production use. `serve_nodes` must be set
|
||||
// on the cluster.
|
||||
PRODUCTION = 1;
|
||||
|
||||
// The instance is meant for development and testing purposes only; it has
|
||||
// no performance or uptime guarantees and is not covered by SLA.
|
||||
// After a development instance is created, it can be upgraded by
|
||||
// updating the instance to type `PRODUCTION`. An instance created
|
||||
// as a production instance cannot be changed to a development instance.
|
||||
// When creating a development instance, `serve_nodes` on the cluster must
|
||||
// not be set.
|
||||
DEVELOPMENT = 2;
|
||||
}
|
||||
|
||||
// (`OutputOnly`)
|
||||
// The unique name of the instance. Values are of the form
|
||||
// `projects/<project>/instances/[a-z][a-z0-9\\-]+[a-z0-9]`.
|
||||
string name = 1;
|
||||
|
||||
// The descriptive name for this instance as it appears in UIs.
|
||||
// Can be changed at any time, but should be kept globally unique
|
||||
// to avoid confusion.
|
||||
string display_name = 2;
|
||||
|
||||
// (`OutputOnly`)
|
||||
// The current state of the instance.
|
||||
State state = 3;
|
||||
|
||||
// The type of the instance. Defaults to `PRODUCTION`.
|
||||
Type type = 4;
|
||||
|
||||
// Labels are a flexible and lightweight mechanism for organizing cloud
|
||||
// resources into groups that reflect a customer's organizational needs and
|
||||
// deployment strategies. They can be used to filter resources and aggregate
|
||||
// metrics.
|
||||
//
|
||||
// * Label keys must be between 1 and 63 characters long and must conform to
|
||||
// the regular expression: `[\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}`.
|
||||
// * Label values must be between 0 and 63 characters long and must conform to
|
||||
// the regular expression: `[\p{Ll}\p{Lo}\p{N}_-]{0,63}`.
|
||||
// * No more than 64 labels can be associated with a given resource.
|
||||
// * Keys and values must both be under 128 bytes.
|
||||
map<string, string> labels = 5;
|
||||
}
|
||||
|
||||
// A resizable group of nodes in a particular cloud location, capable
|
||||
// of serving all [Tables][google.bigtable.admin.v2.Table] in the parent
|
||||
// [Instance][google.bigtable.admin.v2.Instance].
|
||||
message Cluster {
|
||||
// Possible states of a cluster.
|
||||
enum State {
|
||||
// The state of the cluster could not be determined.
|
||||
STATE_NOT_KNOWN = 0;
|
||||
|
||||
// The cluster has been successfully created and is ready to serve requests.
|
||||
READY = 1;
|
||||
|
||||
// The cluster is currently being created, and may be destroyed
|
||||
// if the creation process encounters an error.
|
||||
// A cluster may not be able to serve requests while being created.
|
||||
CREATING = 2;
|
||||
|
||||
// The cluster is currently being resized, and may revert to its previous
|
||||
// node count if the process encounters an error.
|
||||
// A cluster is still capable of serving requests while being resized,
|
||||
// but may exhibit performance as if its number of allocated nodes is
|
||||
// between the starting and requested states.
|
||||
RESIZING = 3;
|
||||
|
||||
// The cluster has no backing nodes. The data (tables) still
|
||||
// exist, but no operations can be performed on the cluster.
|
||||
DISABLED = 4;
|
||||
}
|
||||
|
||||
// (`OutputOnly`)
|
||||
// The unique name of the cluster. Values are of the form
|
||||
// `projects/<project>/instances/<instance>/clusters/[a-z][-a-z0-9]*`.
|
||||
string name = 1;
|
||||
|
||||
// (`CreationOnly`)
|
||||
// The location where this cluster's nodes and storage reside. For best
|
||||
// performance, clients should be located as close as possible to this
|
||||
// cluster. Currently only zones are supported, so values should be of the
|
||||
// form `projects/<project>/locations/<zone>`.
|
||||
string location = 2;
|
||||
|
||||
// (`OutputOnly`)
|
||||
// The current state of the cluster.
|
||||
State state = 3;
|
||||
|
||||
// The number of nodes allocated to this cluster. More nodes enable higher
|
||||
// throughput and more consistent performance.
|
||||
int32 serve_nodes = 4;
|
||||
|
||||
// (`CreationOnly`)
|
||||
// The type of storage used by this cluster to serve its
|
||||
// parent instance's tables, unless explicitly overridden.
|
||||
StorageType default_storage_type = 5;
|
||||
}
|
||||
|
||||
// A configuration object describing how Cloud Bigtable should treat traffic
|
||||
// from a particular end user application.
|
||||
message AppProfile {
|
||||
// Read/write requests may be routed to any cluster in the instance, and will
|
||||
// fail over to another cluster in the event of transient errors or delays.
|
||||
// Choosing this option sacrifices read-your-writes consistency to improve
|
||||
// availability.
|
||||
message MultiClusterRoutingUseAny {
|
||||
|
||||
}
|
||||
|
||||
// Unconditionally routes all read/write requests to a specific cluster.
|
||||
// This option preserves read-your-writes consistency, but does not improve
|
||||
// availability.
|
||||
message SingleClusterRouting {
|
||||
// The cluster to which read/write requests should be routed.
|
||||
string cluster_id = 1;
|
||||
|
||||
// Whether or not `CheckAndMutateRow` and `ReadModifyWriteRow` requests are
|
||||
// allowed by this app profile. It is unsafe to send these requests to
|
||||
// the same table/row/column in multiple clusters.
|
||||
bool allow_transactional_writes = 2;
|
||||
}
|
||||
|
||||
// (`OutputOnly`)
|
||||
// The unique name of the app profile. Values are of the form
|
||||
// `projects/<project>/instances/<instance>/appProfiles/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`.
|
||||
string name = 1;
|
||||
|
||||
// Strongly validated etag for optimistic concurrency control. Preserve the
|
||||
// value returned from `GetAppProfile` when calling `UpdateAppProfile` to
|
||||
// fail the request if there has been a modification in the mean time. The
|
||||
// `update_mask` of the request need not include `etag` for this protection
|
||||
// to apply.
|
||||
// See [Wikipedia](https://en.wikipedia.org/wiki/HTTP_ETag) and
|
||||
// [RFC 7232](https://tools.ietf.org/html/rfc7232#section-2.3) for more
|
||||
// details.
|
||||
string etag = 2;
|
||||
|
||||
// Optional long form description of the use case for this AppProfile.
|
||||
string description = 3;
|
||||
|
||||
// The routing policy for all read/write requests which use this app profile.
|
||||
// A value must be explicitly set.
|
||||
oneof routing_policy {
|
||||
// Use a multi-cluster routing policy that may pick any cluster.
|
||||
MultiClusterRoutingUseAny multi_cluster_routing_use_any = 5;
|
||||
|
||||
// Use a single-cluster routing policy.
|
||||
SingleClusterRouting single_cluster_routing = 6;
|
||||
}
|
||||
}
|
221
express-server/node_modules/google-proto-files/google/bigtable/admin/v2/table.proto
generated
vendored
Normal file
221
express-server/node_modules/google-proto-files/google/bigtable/admin/v2/table.proto
generated
vendored
Normal file
@ -0,0 +1,221 @@
|
||||
// Copyright 2018 Google LLC.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.bigtable.admin.v2;
|
||||
|
||||
import "google/api/annotations.proto";
|
||||
import "google/protobuf/duration.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
option csharp_namespace = "Google.Cloud.Bigtable.Admin.V2";
|
||||
option go_package = "google.golang.org/genproto/googleapis/bigtable/admin/v2;admin";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "TableProto";
|
||||
option java_package = "com.google.bigtable.admin.v2";
|
||||
option php_namespace = "Google\\Cloud\\Bigtable\\Admin\\V2";
|
||||
|
||||
|
||||
// A collection of user data indexed by row, column, and timestamp.
|
||||
// Each table is served using the resources of its parent cluster.
|
||||
message Table {
|
||||
// The state of a table's data in a particular cluster.
|
||||
message ClusterState {
|
||||
// Table replication states.
|
||||
enum ReplicationState {
|
||||
// The replication state of the table is unknown in this cluster.
|
||||
STATE_NOT_KNOWN = 0;
|
||||
|
||||
// The cluster was recently created, and the table must finish copying
|
||||
// over pre-existing data from other clusters before it can begin
|
||||
// receiving live replication updates and serving Data API requests.
|
||||
INITIALIZING = 1;
|
||||
|
||||
// The table is temporarily unable to serve Data API requests from this
|
||||
// cluster due to planned internal maintenance.
|
||||
PLANNED_MAINTENANCE = 2;
|
||||
|
||||
// The table is temporarily unable to serve Data API requests from this
|
||||
// cluster due to unplanned or emergency maintenance.
|
||||
UNPLANNED_MAINTENANCE = 3;
|
||||
|
||||
// The table can serve Data API requests from this cluster. Depending on
|
||||
// replication delay, reads may not immediately reflect the state of the
|
||||
// table in other clusters.
|
||||
READY = 4;
|
||||
}
|
||||
|
||||
// (`OutputOnly`)
|
||||
// The state of replication for the table in this cluster.
|
||||
ReplicationState replication_state = 1;
|
||||
}
|
||||
|
||||
// Possible timestamp granularities to use when keeping multiple versions
|
||||
// of data in a table.
|
||||
enum TimestampGranularity {
|
||||
// The user did not specify a granularity. Should not be returned.
|
||||
// When specified during table creation, MILLIS will be used.
|
||||
TIMESTAMP_GRANULARITY_UNSPECIFIED = 0;
|
||||
|
||||
// The table keeps data versioned at a granularity of 1ms.
|
||||
MILLIS = 1;
|
||||
}
|
||||
|
||||
// Defines a view over a table's fields.
|
||||
enum View {
|
||||
// Uses the default view for each method as documented in its request.
|
||||
VIEW_UNSPECIFIED = 0;
|
||||
|
||||
// Only populates `name`.
|
||||
NAME_ONLY = 1;
|
||||
|
||||
// Only populates `name` and fields related to the table's schema.
|
||||
SCHEMA_VIEW = 2;
|
||||
|
||||
// Only populates `name` and fields related to the table's
|
||||
// replication state.
|
||||
REPLICATION_VIEW = 3;
|
||||
|
||||
// Populates all fields.
|
||||
FULL = 4;
|
||||
}
|
||||
|
||||
// (`OutputOnly`)
|
||||
// The unique name of the table. Values are of the form
|
||||
// `projects/<project>/instances/<instance>/tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`.
|
||||
// Views: `NAME_ONLY`, `SCHEMA_VIEW`, `REPLICATION_VIEW`, `FULL`
|
||||
string name = 1;
|
||||
|
||||
// (`OutputOnly`)
|
||||
// Map from cluster ID to per-cluster table state.
|
||||
// If it could not be determined whether or not the table has data in a
|
||||
// particular cluster (for example, if its zone is unavailable), then
|
||||
// there will be an entry for the cluster with UNKNOWN `replication_status`.
|
||||
// Views: `REPLICATION_VIEW`, `FULL`
|
||||
map<string, ClusterState> cluster_states = 2;
|
||||
|
||||
// (`CreationOnly`)
|
||||
// The column families configured for this table, mapped by column family ID.
|
||||
// Views: `SCHEMA_VIEW`, `FULL`
|
||||
map<string, ColumnFamily> column_families = 3;
|
||||
|
||||
// (`CreationOnly`)
|
||||
// The granularity (i.e. `MILLIS`) at which timestamps are stored in
|
||||
// this table. Timestamps not matching the granularity will be rejected.
|
||||
// If unspecified at creation time, the value will be set to `MILLIS`.
|
||||
// Views: `SCHEMA_VIEW`, `FULL`
|
||||
TimestampGranularity granularity = 4;
|
||||
}
|
||||
|
||||
// A set of columns within a table which share a common configuration.
|
||||
message ColumnFamily {
|
||||
// Garbage collection rule specified as a protobuf.
|
||||
// Must serialize to at most 500 bytes.
|
||||
//
|
||||
// NOTE: Garbage collection executes opportunistically in the background, and
|
||||
// so it's possible for reads to return a cell even if it matches the active
|
||||
// GC expression for its family.
|
||||
GcRule gc_rule = 1;
|
||||
}
|
||||
|
||||
// Rule for determining which cells to delete during garbage collection.
|
||||
message GcRule {
|
||||
// A GcRule which deletes cells matching all of the given rules.
|
||||
message Intersection {
|
||||
// Only delete cells which would be deleted by every element of `rules`.
|
||||
repeated GcRule rules = 1;
|
||||
}
|
||||
|
||||
// A GcRule which deletes cells matching any of the given rules.
|
||||
message Union {
|
||||
// Delete cells which would be deleted by any element of `rules`.
|
||||
repeated GcRule rules = 1;
|
||||
}
|
||||
|
||||
// Garbage collection rules.
|
||||
oneof rule {
|
||||
// Delete all cells in a column except the most recent N.
|
||||
int32 max_num_versions = 1;
|
||||
|
||||
// Delete cells in a column older than the given age.
|
||||
// Values must be at least one millisecond, and will be truncated to
|
||||
// microsecond granularity.
|
||||
google.protobuf.Duration max_age = 2;
|
||||
|
||||
// Delete cells that would be deleted by every nested rule.
|
||||
Intersection intersection = 3;
|
||||
|
||||
// Delete cells that would be deleted by any nested rule.
|
||||
Union union = 4;
|
||||
}
|
||||
}
|
||||
|
||||
// A snapshot of a table at a particular time. A snapshot can be used as a
|
||||
// checkpoint for data restoration or a data source for a new table.
|
||||
//
|
||||
// Note: This is a private alpha release of Cloud Bigtable snapshots. This
|
||||
// feature is not currently available to most Cloud Bigtable customers. This
|
||||
// feature might be changed in backward-incompatible ways and is not recommended
|
||||
// for production use. It is not subject to any SLA or deprecation policy.
|
||||
message Snapshot {
|
||||
// Possible states of a snapshot.
|
||||
enum State {
|
||||
// The state of the snapshot could not be determined.
|
||||
STATE_NOT_KNOWN = 0;
|
||||
|
||||
// The snapshot has been successfully created and can serve all requests.
|
||||
READY = 1;
|
||||
|
||||
// The snapshot is currently being created, and may be destroyed if the
|
||||
// creation process encounters an error. A snapshot may not be restored to a
|
||||
// table while it is being created.
|
||||
CREATING = 2;
|
||||
}
|
||||
|
||||
// (`OutputOnly`)
|
||||
// The unique name of the snapshot.
|
||||
// Values are of the form
|
||||
// `projects/<project>/instances/<instance>/clusters/<cluster>/snapshots/<snapshot>`.
|
||||
string name = 1;
|
||||
|
||||
// (`OutputOnly`)
|
||||
// The source table at the time the snapshot was taken.
|
||||
Table source_table = 2;
|
||||
|
||||
// (`OutputOnly`)
|
||||
// The size of the data in the source table at the time the snapshot was
|
||||
// taken. In some cases, this value may be computed asynchronously via a
|
||||
// background process and a placeholder of 0 will be used in the meantime.
|
||||
int64 data_size_bytes = 3;
|
||||
|
||||
// (`OutputOnly`)
|
||||
// The time when the snapshot is created.
|
||||
google.protobuf.Timestamp create_time = 4;
|
||||
|
||||
// (`OutputOnly`)
|
||||
// The time when the snapshot will be deleted. The maximum amount of time a
|
||||
// snapshot can stay active is 365 days. If 'ttl' is not specified,
|
||||
// the default maximum of 365 days will be used.
|
||||
google.protobuf.Timestamp delete_time = 5;
|
||||
|
||||
// (`OutputOnly`)
|
||||
// The current state of the snapshot.
|
||||
State state = 6;
|
||||
|
||||
// (`OutputOnly`)
|
||||
// Description of the snapshot.
|
||||
string description = 7;
|
||||
}
|
516
express-server/node_modules/google-proto-files/google/bigtable/v1/bigtable_data.proto
generated
vendored
Normal file
516
express-server/node_modules/google-proto-files/google/bigtable/v1/bigtable_data.proto
generated
vendored
Normal file
@ -0,0 +1,516 @@
|
||||
// Copyright 2018 Google Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.bigtable.v1;
|
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/bigtable/v1;bigtable";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "BigtableDataProto";
|
||||
option java_package = "com.google.bigtable.v1";
|
||||
|
||||
|
||||
// Specifies the complete (requested) contents of a single row of a table.
|
||||
// Rows which exceed 256MiB in size cannot be read in full.
|
||||
message Row {
|
||||
// The unique key which identifies this row within its table. This is the same
|
||||
// key that's used to identify the row in, for example, a MutateRowRequest.
|
||||
// May contain any non-empty byte string up to 4KiB in length.
|
||||
bytes key = 1;
|
||||
|
||||
// May be empty, but only if the entire row is empty.
|
||||
// The mutual ordering of column families is not specified.
|
||||
repeated Family families = 2;
|
||||
}
|
||||
|
||||
// Specifies (some of) the contents of a single row/column family of a table.
|
||||
message Family {
|
||||
// The unique key which identifies this family within its row. This is the
|
||||
// same key that's used to identify the family in, for example, a RowFilter
|
||||
// which sets its "family_name_regex_filter" field.
|
||||
// Must match [-_.a-zA-Z0-9]+, except that AggregatingRowProcessors may
|
||||
// produce cells in a sentinel family with an empty name.
|
||||
// Must be no greater than 64 characters in length.
|
||||
string name = 1;
|
||||
|
||||
// Must not be empty. Sorted in order of increasing "qualifier".
|
||||
repeated Column columns = 2;
|
||||
}
|
||||
|
||||
// Specifies (some of) the contents of a single row/column of a table.
|
||||
message Column {
|
||||
// The unique key which identifies this column within its family. This is the
|
||||
// same key that's used to identify the column in, for example, a RowFilter
|
||||
// which sets its "column_qualifier_regex_filter" field.
|
||||
// May contain any byte string, including the empty string, up to 16kiB in
|
||||
// length.
|
||||
bytes qualifier = 1;
|
||||
|
||||
// Must not be empty. Sorted in order of decreasing "timestamp_micros".
|
||||
repeated Cell cells = 2;
|
||||
}
|
||||
|
||||
// Specifies (some of) the contents of a single row/column/timestamp of a table.
|
||||
message Cell {
|
||||
// The cell's stored timestamp, which also uniquely identifies it within
|
||||
// its column.
|
||||
// Values are always expressed in microseconds, but individual tables may set
|
||||
// a coarser "granularity" to further restrict the allowed values. For
|
||||
// example, a table which specifies millisecond granularity will only allow
|
||||
// values of "timestamp_micros" which are multiples of 1000.
|
||||
int64 timestamp_micros = 1;
|
||||
|
||||
// The value stored in the cell.
|
||||
// May contain any byte string, including the empty string, up to 100MiB in
|
||||
// length.
|
||||
bytes value = 2;
|
||||
|
||||
// Labels applied to the cell by a [RowFilter][google.bigtable.v1.RowFilter].
|
||||
repeated string labels = 3;
|
||||
}
|
||||
|
||||
// Specifies a contiguous range of rows.
|
||||
message RowRange {
|
||||
// Inclusive lower bound. If left empty, interpreted as the empty string.
|
||||
bytes start_key = 2;
|
||||
|
||||
// Exclusive upper bound. If left empty, interpreted as infinity.
|
||||
bytes end_key = 3;
|
||||
}
|
||||
|
||||
// Specifies a non-contiguous set of rows.
|
||||
message RowSet {
|
||||
// Single rows included in the set.
|
||||
repeated bytes row_keys = 1;
|
||||
|
||||
// Contiguous row ranges included in the set.
|
||||
repeated RowRange row_ranges = 2;
|
||||
}
|
||||
|
||||
// Specifies a contiguous range of columns within a single column family.
|
||||
// The range spans from <column_family>:<start_qualifier> to
|
||||
// <column_family>:<end_qualifier>, where both bounds can be either inclusive or
|
||||
// exclusive.
|
||||
message ColumnRange {
|
||||
// The name of the column family within which this range falls.
|
||||
string family_name = 1;
|
||||
|
||||
// The column qualifier at which to start the range (within 'column_family').
|
||||
// If neither field is set, interpreted as the empty string, inclusive.
|
||||
oneof start_qualifier {
|
||||
// Used when giving an inclusive lower bound for the range.
|
||||
bytes start_qualifier_inclusive = 2;
|
||||
|
||||
// Used when giving an exclusive lower bound for the range.
|
||||
bytes start_qualifier_exclusive = 3;
|
||||
}
|
||||
|
||||
// The column qualifier at which to end the range (within 'column_family').
|
||||
// If neither field is set, interpreted as the infinite string, exclusive.
|
||||
oneof end_qualifier {
|
||||
// Used when giving an inclusive upper bound for the range.
|
||||
bytes end_qualifier_inclusive = 4;
|
||||
|
||||
// Used when giving an exclusive upper bound for the range.
|
||||
bytes end_qualifier_exclusive = 5;
|
||||
}
|
||||
}
|
||||
|
||||
// Specified a contiguous range of microsecond timestamps.
|
||||
message TimestampRange {
|
||||
// Inclusive lower bound. If left empty, interpreted as 0.
|
||||
int64 start_timestamp_micros = 1;
|
||||
|
||||
// Exclusive upper bound. If left empty, interpreted as infinity.
|
||||
int64 end_timestamp_micros = 2;
|
||||
}
|
||||
|
||||
// Specifies a contiguous range of raw byte values.
|
||||
message ValueRange {
|
||||
// The value at which to start the range.
|
||||
// If neither field is set, interpreted as the empty string, inclusive.
|
||||
oneof start_value {
|
||||
// Used when giving an inclusive lower bound for the range.
|
||||
bytes start_value_inclusive = 1;
|
||||
|
||||
// Used when giving an exclusive lower bound for the range.
|
||||
bytes start_value_exclusive = 2;
|
||||
}
|
||||
|
||||
// The value at which to end the range.
|
||||
// If neither field is set, interpreted as the infinite string, exclusive.
|
||||
oneof end_value {
|
||||
// Used when giving an inclusive upper bound for the range.
|
||||
bytes end_value_inclusive = 3;
|
||||
|
||||
// Used when giving an exclusive upper bound for the range.
|
||||
bytes end_value_exclusive = 4;
|
||||
}
|
||||
}
|
||||
|
||||
// Takes a row as input and produces an alternate view of the row based on
|
||||
// specified rules. For example, a RowFilter might trim down a row to include
|
||||
// just the cells from columns matching a given regular expression, or might
|
||||
// return all the cells of a row but not their values. More complicated filters
|
||||
// can be composed out of these components to express requests such as, "within
|
||||
// every column of a particular family, give just the two most recent cells
|
||||
// which are older than timestamp X."
|
||||
//
|
||||
// There are two broad categories of RowFilters (true filters and transformers),
|
||||
// as well as two ways to compose simple filters into more complex ones
|
||||
// (chains and interleaves). They work as follows:
|
||||
//
|
||||
// * True filters alter the input row by excluding some of its cells wholesale
|
||||
// from the output row. An example of a true filter is the "value_regex_filter",
|
||||
// which excludes cells whose values don't match the specified pattern. All
|
||||
// regex true filters use RE2 syntax (https://github.com/google/re2/wiki/Syntax)
|
||||
// in raw byte mode (RE2::Latin1), and are evaluated as full matches. An
|
||||
// important point to keep in mind is that RE2(.) is equivalent by default to
|
||||
// RE2([^\n]), meaning that it does not match newlines. When attempting to match
|
||||
// an arbitrary byte, you should therefore use the escape sequence '\C', which
|
||||
// may need to be further escaped as '\\C' in your client language.
|
||||
//
|
||||
// * Transformers alter the input row by changing the values of some of its
|
||||
// cells in the output, without excluding them completely. Currently, the only
|
||||
// supported transformer is the "strip_value_transformer", which replaces every
|
||||
// cell's value with the empty string.
|
||||
//
|
||||
// * Chains and interleaves are described in more detail in the
|
||||
// RowFilter.Chain and RowFilter.Interleave documentation.
|
||||
//
|
||||
// The total serialized size of a RowFilter message must not
|
||||
// exceed 4096 bytes, and RowFilters may not be nested within each other
|
||||
// (in Chains or Interleaves) to a depth of more than 20.
|
||||
message RowFilter {
|
||||
// A RowFilter which sends rows through several RowFilters in sequence.
|
||||
message Chain {
|
||||
// The elements of "filters" are chained together to process the input row:
|
||||
// in row -> f(0) -> intermediate row -> f(1) -> ... -> f(N) -> out row
|
||||
// The full chain is executed atomically.
|
||||
repeated RowFilter filters = 1;
|
||||
}
|
||||
|
||||
// A RowFilter which sends each row to each of several component
|
||||
// RowFilters and interleaves the results.
|
||||
message Interleave {
|
||||
// The elements of "filters" all process a copy of the input row, and the
|
||||
// results are pooled, sorted, and combined into a single output row.
|
||||
// If multiple cells are produced with the same column and timestamp,
|
||||
// they will all appear in the output row in an unspecified mutual order.
|
||||
// Consider the following example, with three filters:
|
||||
//
|
||||
// input row
|
||||
// |
|
||||
// -----------------------------------------------------
|
||||
// | | |
|
||||
// f(0) f(1) f(2)
|
||||
// | | |
|
||||
// 1: foo,bar,10,x foo,bar,10,z far,bar,7,a
|
||||
// 2: foo,blah,11,z far,blah,5,x far,blah,5,x
|
||||
// | | |
|
||||
// -----------------------------------------------------
|
||||
// |
|
||||
// 1: foo,bar,10,z // could have switched with #2
|
||||
// 2: foo,bar,10,x // could have switched with #1
|
||||
// 3: foo,blah,11,z
|
||||
// 4: far,bar,7,a
|
||||
// 5: far,blah,5,x // identical to #6
|
||||
// 6: far,blah,5,x // identical to #5
|
||||
// All interleaved filters are executed atomically.
|
||||
repeated RowFilter filters = 1;
|
||||
}
|
||||
|
||||
// A RowFilter which evaluates one of two possible RowFilters, depending on
|
||||
// whether or not a predicate RowFilter outputs any cells from the input row.
|
||||
//
|
||||
// IMPORTANT NOTE: The predicate filter does not execute atomically with the
|
||||
// true and false filters, which may lead to inconsistent or unexpected
|
||||
// results. Additionally, Condition filters have poor performance, especially
|
||||
// when filters are set for the false condition.
|
||||
message Condition {
|
||||
// If "predicate_filter" outputs any cells, then "true_filter" will be
|
||||
// evaluated on the input row. Otherwise, "false_filter" will be evaluated.
|
||||
RowFilter predicate_filter = 1;
|
||||
|
||||
// The filter to apply to the input row if "predicate_filter" returns any
|
||||
// results. If not provided, no results will be returned in the true case.
|
||||
RowFilter true_filter = 2;
|
||||
|
||||
// The filter to apply to the input row if "predicate_filter" does not
|
||||
// return any results. If not provided, no results will be returned in the
|
||||
// false case.
|
||||
RowFilter false_filter = 3;
|
||||
}
|
||||
|
||||
// Which of the possible RowFilter types to apply. If none are set, this
|
||||
// RowFilter returns all cells in the input row.
|
||||
oneof filter {
|
||||
// Applies several RowFilters to the data in sequence, progressively
|
||||
// narrowing the results.
|
||||
Chain chain = 1;
|
||||
|
||||
// Applies several RowFilters to the data in parallel and combines the
|
||||
// results.
|
||||
Interleave interleave = 2;
|
||||
|
||||
// Applies one of two possible RowFilters to the data based on the output of
|
||||
// a predicate RowFilter.
|
||||
Condition condition = 3;
|
||||
|
||||
// ADVANCED USE ONLY.
|
||||
// Hook for introspection into the RowFilter. Outputs all cells directly to
|
||||
// the output of the read rather than to any parent filter. Consider the
|
||||
// following example:
|
||||
//
|
||||
// Chain(
|
||||
// FamilyRegex("A"),
|
||||
// Interleave(
|
||||
// All(),
|
||||
// Chain(Label("foo"), Sink())
|
||||
// ),
|
||||
// QualifierRegex("B")
|
||||
// )
|
||||
//
|
||||
// A,A,1,w
|
||||
// A,B,2,x
|
||||
// B,B,4,z
|
||||
// |
|
||||
// FamilyRegex("A")
|
||||
// |
|
||||
// A,A,1,w
|
||||
// A,B,2,x
|
||||
// |
|
||||
// +------------+-------------+
|
||||
// | |
|
||||
// All() Label(foo)
|
||||
// | |
|
||||
// A,A,1,w A,A,1,w,labels:[foo]
|
||||
// A,B,2,x A,B,2,x,labels:[foo]
|
||||
// | |
|
||||
// | Sink() --------------+
|
||||
// | | |
|
||||
// +------------+ x------+ A,A,1,w,labels:[foo]
|
||||
// | A,B,2,x,labels:[foo]
|
||||
// A,A,1,w |
|
||||
// A,B,2,x |
|
||||
// | |
|
||||
// QualifierRegex("B") |
|
||||
// | |
|
||||
// A,B,2,x |
|
||||
// | |
|
||||
// +--------------------------------+
|
||||
// |
|
||||
// A,A,1,w,labels:[foo]
|
||||
// A,B,2,x,labels:[foo] // could be switched
|
||||
// A,B,2,x // could be switched
|
||||
//
|
||||
// Despite being excluded by the qualifier filter, a copy of every cell
|
||||
// that reaches the sink is present in the final result.
|
||||
//
|
||||
// As with an [Interleave][google.bigtable.v1.RowFilter.Interleave],
|
||||
// duplicate cells are possible, and appear in an unspecified mutual order.
|
||||
// In this case we have a duplicate with column "A:B" and timestamp 2,
|
||||
// because one copy passed through the all filter while the other was
|
||||
// passed through the label and sink. Note that one copy has label "foo",
|
||||
// while the other does not.
|
||||
//
|
||||
// Cannot be used within the `predicate_filter`, `true_filter`, or
|
||||
// `false_filter` of a [Condition][google.bigtable.v1.RowFilter.Condition].
|
||||
bool sink = 16;
|
||||
|
||||
// Matches all cells, regardless of input. Functionally equivalent to
|
||||
// leaving `filter` unset, but included for completeness.
|
||||
bool pass_all_filter = 17;
|
||||
|
||||
// Does not match any cells, regardless of input. Useful for temporarily
|
||||
// disabling just part of a filter.
|
||||
bool block_all_filter = 18;
|
||||
|
||||
// Matches only cells from rows whose keys satisfy the given RE2 regex. In
|
||||
// other words, passes through the entire row when the key matches, and
|
||||
// otherwise produces an empty row.
|
||||
// Note that, since row keys can contain arbitrary bytes, the '\C' escape
|
||||
// sequence must be used if a true wildcard is desired. The '.' character
|
||||
// will not match the new line character '\n', which may be present in a
|
||||
// binary key.
|
||||
bytes row_key_regex_filter = 4;
|
||||
|
||||
// Matches all cells from a row with probability p, and matches no cells
|
||||
// from the row with probability 1-p.
|
||||
double row_sample_filter = 14;
|
||||
|
||||
// Matches only cells from columns whose families satisfy the given RE2
|
||||
// regex. For technical reasons, the regex must not contain the ':'
|
||||
// character, even if it is not being used as a literal.
|
||||
// Note that, since column families cannot contain the new line character
|
||||
// '\n', it is sufficient to use '.' as a full wildcard when matching
|
||||
// column family names.
|
||||
string family_name_regex_filter = 5;
|
||||
|
||||
// Matches only cells from columns whose qualifiers satisfy the given RE2
|
||||
// regex.
|
||||
// Note that, since column qualifiers can contain arbitrary bytes, the '\C'
|
||||
// escape sequence must be used if a true wildcard is desired. The '.'
|
||||
// character will not match the new line character '\n', which may be
|
||||
// present in a binary qualifier.
|
||||
bytes column_qualifier_regex_filter = 6;
|
||||
|
||||
// Matches only cells from columns within the given range.
|
||||
ColumnRange column_range_filter = 7;
|
||||
|
||||
// Matches only cells with timestamps within the given range.
|
||||
TimestampRange timestamp_range_filter = 8;
|
||||
|
||||
// Matches only cells with values that satisfy the given regular expression.
|
||||
// Note that, since cell values can contain arbitrary bytes, the '\C' escape
|
||||
// sequence must be used if a true wildcard is desired. The '.' character
|
||||
// will not match the new line character '\n', which may be present in a
|
||||
// binary value.
|
||||
bytes value_regex_filter = 9;
|
||||
|
||||
// Matches only cells with values that fall within the given range.
|
||||
ValueRange value_range_filter = 15;
|
||||
|
||||
// Skips the first N cells of each row, matching all subsequent cells.
|
||||
// If duplicate cells are present, as is possible when using an Interleave,
|
||||
// each copy of the cell is counted separately.
|
||||
int32 cells_per_row_offset_filter = 10;
|
||||
|
||||
// Matches only the first N cells of each row.
|
||||
// If duplicate cells are present, as is possible when using an Interleave,
|
||||
// each copy of the cell is counted separately.
|
||||
int32 cells_per_row_limit_filter = 11;
|
||||
|
||||
// Matches only the most recent N cells within each column. For example,
|
||||
// if N=2, this filter would match column "foo:bar" at timestamps 10 and 9,
|
||||
// skip all earlier cells in "foo:bar", and then begin matching again in
|
||||
// column "foo:bar2".
|
||||
// If duplicate cells are present, as is possible when using an Interleave,
|
||||
// each copy of the cell is counted separately.
|
||||
int32 cells_per_column_limit_filter = 12;
|
||||
|
||||
// Replaces each cell's value with the empty string.
|
||||
bool strip_value_transformer = 13;
|
||||
|
||||
// Applies the given label to all cells in the output row. This allows
|
||||
// the client to determine which results were produced from which part of
|
||||
// the filter.
|
||||
//
|
||||
// Values must be at most 15 characters in length, and match the RE2
|
||||
// pattern [a-z0-9\\-]+
|
||||
//
|
||||
// Due to a technical limitation, it is not currently possible to apply
|
||||
// multiple labels to a cell. As a result, a Chain may have no more than
|
||||
// one sub-filter which contains a apply_label_transformer. It is okay for
|
||||
// an Interleave to contain multiple apply_label_transformers, as they will
|
||||
// be applied to separate copies of the input. This may be relaxed in the
|
||||
// future.
|
||||
string apply_label_transformer = 19;
|
||||
}
|
||||
}
|
||||
|
||||
// Specifies a particular change to be made to the contents of a row.
|
||||
message Mutation {
|
||||
// A Mutation which sets the value of the specified cell.
|
||||
message SetCell {
|
||||
// The name of the family into which new data should be written.
|
||||
// Must match [-_.a-zA-Z0-9]+
|
||||
string family_name = 1;
|
||||
|
||||
// The qualifier of the column into which new data should be written.
|
||||
// Can be any byte string, including the empty string.
|
||||
bytes column_qualifier = 2;
|
||||
|
||||
// The timestamp of the cell into which new data should be written.
|
||||
// Use -1 for current Bigtable server time.
|
||||
// Otherwise, the client should set this value itself, noting that the
|
||||
// default value is a timestamp of zero if the field is left unspecified.
|
||||
// Values must match the "granularity" of the table (e.g. micros, millis).
|
||||
int64 timestamp_micros = 3;
|
||||
|
||||
// The value to be written into the specified cell.
|
||||
bytes value = 4;
|
||||
}
|
||||
|
||||
// A Mutation which deletes cells from the specified column, optionally
|
||||
// restricting the deletions to a given timestamp range.
|
||||
message DeleteFromColumn {
|
||||
// The name of the family from which cells should be deleted.
|
||||
// Must match [-_.a-zA-Z0-9]+
|
||||
string family_name = 1;
|
||||
|
||||
// The qualifier of the column from which cells should be deleted.
|
||||
// Can be any byte string, including the empty string.
|
||||
bytes column_qualifier = 2;
|
||||
|
||||
// The range of timestamps within which cells should be deleted.
|
||||
TimestampRange time_range = 3;
|
||||
}
|
||||
|
||||
// A Mutation which deletes all cells from the specified column family.
|
||||
message DeleteFromFamily {
|
||||
// The name of the family from which cells should be deleted.
|
||||
// Must match [-_.a-zA-Z0-9]+
|
||||
string family_name = 1;
|
||||
}
|
||||
|
||||
// A Mutation which deletes all cells from the containing row.
|
||||
message DeleteFromRow {
|
||||
|
||||
}
|
||||
|
||||
// Which of the possible Mutation types to apply.
|
||||
oneof mutation {
|
||||
// Set a cell's value.
|
||||
SetCell set_cell = 1;
|
||||
|
||||
// Deletes cells from a column.
|
||||
DeleteFromColumn delete_from_column = 2;
|
||||
|
||||
// Deletes cells from a column family.
|
||||
DeleteFromFamily delete_from_family = 3;
|
||||
|
||||
// Deletes cells from the entire row.
|
||||
DeleteFromRow delete_from_row = 4;
|
||||
}
|
||||
}
|
||||
|
||||
// Specifies an atomic read/modify/write operation on the latest value of the
|
||||
// specified column.
|
||||
message ReadModifyWriteRule {
|
||||
// The name of the family to which the read/modify/write should be applied.
|
||||
// Must match [-_.a-zA-Z0-9]+
|
||||
string family_name = 1;
|
||||
|
||||
// The qualifier of the column to which the read/modify/write should be
|
||||
// applied.
|
||||
// Can be any byte string, including the empty string.
|
||||
bytes column_qualifier = 2;
|
||||
|
||||
// The rule used to determine the column's new latest value from its current
|
||||
// latest value.
|
||||
oneof rule {
|
||||
// Rule specifying that "append_value" be appended to the existing value.
|
||||
// If the targeted cell is unset, it will be treated as containing the
|
||||
// empty string.
|
||||
bytes append_value = 3;
|
||||
|
||||
// Rule specifying that "increment_amount" be added to the existing value.
|
||||
// If the targeted cell is unset, it will be treated as containing a zero.
|
||||
// Otherwise, the targeted cell must contain an 8-byte value (interpreted
|
||||
// as a 64-bit big-endian signed integer), or the entire request will fail.
|
||||
int64 increment_amount = 4;
|
||||
}
|
||||
}
|
91
express-server/node_modules/google-proto-files/google/bigtable/v1/bigtable_service.proto
generated
vendored
Normal file
91
express-server/node_modules/google-proto-files/google/bigtable/v1/bigtable_service.proto
generated
vendored
Normal file
@ -0,0 +1,91 @@
|
||||
// Copyright 2018 Google Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.bigtable.v1;
|
||||
|
||||
import "google/api/annotations.proto";
|
||||
import "google/bigtable/v1/bigtable_data.proto";
|
||||
import "google/bigtable/v1/bigtable_service_messages.proto";
|
||||
import "google/protobuf/empty.proto";
|
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/bigtable/v1;bigtable";
|
||||
option java_generic_services = true;
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "BigtableServicesProto";
|
||||
option java_package = "com.google.bigtable.v1";
|
||||
|
||||
|
||||
// Service for reading from and writing to existing Bigtables.
|
||||
service BigtableService {
|
||||
// Streams back the contents of all requested rows, optionally applying
|
||||
// the same Reader filter to each. Depending on their size, rows may be
|
||||
// broken up across multiple responses, but atomicity of each row will still
|
||||
// be preserved.
|
||||
rpc ReadRows(ReadRowsRequest) returns (stream ReadRowsResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows:read"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
// Returns a sample of row keys in the table. The returned row keys will
|
||||
// delimit contiguous sections of the table of approximately equal size,
|
||||
// which can be used to break up the data for distributed tasks like
|
||||
// mapreduces.
|
||||
rpc SampleRowKeys(SampleRowKeysRequest) returns (stream SampleRowKeysResponse) {
|
||||
option (google.api.http) = {
|
||||
get: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows:sampleKeys"
|
||||
};
|
||||
}
|
||||
|
||||
// Mutates a row atomically. Cells already present in the row are left
|
||||
// unchanged unless explicitly changed by 'mutation'.
|
||||
rpc MutateRow(MutateRowRequest) returns (google.protobuf.Empty) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:mutate"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
// Mutates multiple rows in a batch. Each individual row is mutated
|
||||
// atomically as in MutateRow, but the entire batch is not executed
|
||||
// atomically.
|
||||
rpc MutateRows(MutateRowsRequest) returns (MutateRowsResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}:mutateRows"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
// Mutates a row atomically based on the output of a predicate Reader filter.
|
||||
rpc CheckAndMutateRow(CheckAndMutateRowRequest) returns (CheckAndMutateRowResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:checkAndMutate"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
// Modifies a row atomically, reading the latest existing timestamp/value from
|
||||
// the specified columns and writing a new value at
|
||||
// max(existing timestamp, current server time) based on pre-defined
|
||||
// read/modify/write rules. Returns the new contents of all modified cells.
|
||||
rpc ReadModifyWriteRow(ReadModifyWriteRowRequest) returns (Row) {
|
||||
option (google.api.http) = {
|
||||
post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:readModifyWrite"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
}
|
218
express-server/node_modules/google-proto-files/google/bigtable/v1/bigtable_service_messages.proto
generated
vendored
Normal file
218
express-server/node_modules/google-proto-files/google/bigtable/v1/bigtable_service_messages.proto
generated
vendored
Normal file
@ -0,0 +1,218 @@
|
||||
// Copyright 2018 Google Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.bigtable.v1;
|
||||
|
||||
import "google/bigtable/v1/bigtable_data.proto";
|
||||
import "google/rpc/status.proto";
|
||||
|
||||
option go_package = "google.golang.org/genproto/googleapis/bigtable/v1;bigtable";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "BigtableServiceMessagesProto";
|
||||
option java_package = "com.google.bigtable.v1";
|
||||
|
||||
|
||||
// Request message for BigtableServer.ReadRows.
|
||||
message ReadRowsRequest {
|
||||
// The unique name of the table from which to read.
|
||||
string table_name = 1;
|
||||
|
||||
// If neither row_key nor row_range is set, reads from all rows.
|
||||
oneof target {
|
||||
// The key of a single row from which to read.
|
||||
bytes row_key = 2;
|
||||
|
||||
// A range of rows from which to read.
|
||||
RowRange row_range = 3;
|
||||
|
||||
// A set of rows from which to read. Entries need not be in order, and will
|
||||
// be deduplicated before reading.
|
||||
// The total serialized size of the set must not exceed 1MB.
|
||||
RowSet row_set = 8;
|
||||
}
|
||||
|
||||
// The filter to apply to the contents of the specified row(s). If unset,
|
||||
// reads the entire table.
|
||||
RowFilter filter = 5;
|
||||
|
||||
// By default, rows are read sequentially, producing results which are
|
||||
// guaranteed to arrive in increasing row order. Setting
|
||||
// "allow_row_interleaving" to true allows multiple rows to be interleaved in
|
||||
// the response stream, which increases throughput but breaks this guarantee,
|
||||
// and may force the client to use more memory to buffer partially-received
|
||||
// rows. Cannot be set to true when specifying "num_rows_limit".
|
||||
bool allow_row_interleaving = 6;
|
||||
|
||||
// The read will terminate after committing to N rows' worth of results. The
|
||||
// default (zero) is to return all results.
|
||||
// Note that "allow_row_interleaving" cannot be set to true when this is set.
|
||||
int64 num_rows_limit = 7;
|
||||
}
|
||||
|
||||
// Response message for BigtableService.ReadRows.
|
||||
message ReadRowsResponse {
|
||||
// Specifies a piece of a row's contents returned as part of the read
|
||||
// response stream.
|
||||
message Chunk {
|
||||
oneof chunk {
|
||||
// A subset of the data from a particular row. As long as no "reset_row"
|
||||
// is received in between, multiple "row_contents" from the same row are
|
||||
// from the same atomic view of that row, and will be received in the
|
||||
// expected family/column/timestamp order.
|
||||
Family row_contents = 1;
|
||||
|
||||
// Indicates that the client should drop all previous chunks for
|
||||
// "row_key", as it will be re-read from the beginning.
|
||||
bool reset_row = 2;
|
||||
|
||||
// Indicates that the client can safely process all previous chunks for
|
||||
// "row_key", as its data has been fully read.
|
||||
bool commit_row = 3;
|
||||
}
|
||||
}
|
||||
|
||||
// The key of the row for which we're receiving data.
|
||||
// Results will be received in increasing row key order, unless
|
||||
// "allow_row_interleaving" was specified in the request.
|
||||
bytes row_key = 1;
|
||||
|
||||
// One or more chunks of the row specified by "row_key".
|
||||
repeated Chunk chunks = 2;
|
||||
}
|
||||
|
||||
// Request message for BigtableService.SampleRowKeys.
|
||||
message SampleRowKeysRequest {
|
||||
// The unique name of the table from which to sample row keys.
|
||||
string table_name = 1;
|
||||
}
|
||||
|
||||
// Response message for BigtableService.SampleRowKeys.
|
||||
message SampleRowKeysResponse {
|
||||
// Sorted streamed sequence of sample row keys in the table. The table might
|
||||
// have contents before the first row key in the list and after the last one,
|
||||
// but a key containing the empty string indicates "end of table" and will be
|
||||
// the last response given, if present.
|
||||
// Note that row keys in this list may not have ever been written to or read
|
||||
// from, and users should therefore not make any assumptions about the row key
|
||||
// structure that are specific to their use case.
|
||||
bytes row_key = 1;
|
||||
|
||||
// Approximate total storage space used by all rows in the table which precede
|
||||
// "row_key". Buffering the contents of all rows between two subsequent
|
||||
// samples would require space roughly equal to the difference in their
|
||||
// "offset_bytes" fields.
|
||||
int64 offset_bytes = 2;
|
||||
}
|
||||
|
||||
// Request message for BigtableService.MutateRow.
|
||||
message MutateRowRequest {
|
||||
// The unique name of the table to which the mutation should be applied.
|
||||
string table_name = 1;
|
||||
|
||||
// The key of the row to which the mutation should be applied.
|
||||
bytes row_key = 2;
|
||||
|
||||
// Changes to be atomically applied to the specified row. Entries are applied
|
||||
// in order, meaning that earlier mutations can be masked by later ones.
|
||||
// Must contain at least one entry and at most 100000.
|
||||
repeated Mutation mutations = 3;
|
||||
}
|
||||
|
||||
// Request message for BigtableService.MutateRows.
|
||||
message MutateRowsRequest {
|
||||
message Entry {
|
||||
// The key of the row to which the `mutations` should be applied.
|
||||
bytes row_key = 1;
|
||||
|
||||
// Changes to be atomically applied to the specified row. Mutations are
|
||||
// applied in order, meaning that earlier mutations can be masked by
|
||||
// later ones.
|
||||
// At least one mutation must be specified.
|
||||
repeated Mutation mutations = 2;
|
||||
}
|
||||
|
||||
// The unique name of the table to which the mutations should be applied.
|
||||
string table_name = 1;
|
||||
|
||||
// The row keys/mutations to be applied in bulk.
|
||||
// Each entry is applied as an atomic mutation, but the entries may be
|
||||
// applied in arbitrary order (even between entries for the same row).
|
||||
// At least one entry must be specified, and in total the entries may
|
||||
// contain at most 100000 mutations.
|
||||
repeated Entry entries = 2;
|
||||
}
|
||||
|
||||
// Response message for BigtableService.MutateRows.
|
||||
message MutateRowsResponse {
|
||||
// The results for each Entry from the request, presented in the order
|
||||
// in which the entries were originally given.
|
||||
// Depending on how requests are batched during execution, it is possible
|
||||
// for one Entry to fail due to an error with another Entry. In the event
|
||||
// that this occurs, the same error will be reported for both entries.
|
||||
repeated google.rpc.Status statuses = 1;
|
||||
}
|
||||
|
||||
// Request message for BigtableService.CheckAndMutateRowRequest
|
||||
message CheckAndMutateRowRequest {
|
||||
// The unique name of the table to which the conditional mutation should be
|
||||
// applied.
|
||||
string table_name = 1;
|
||||
|
||||
// The key of the row to which the conditional mutation should be applied.
|
||||
bytes row_key = 2;
|
||||
|
||||
// The filter to be applied to the contents of the specified row. Depending
|
||||
// on whether or not any results are yielded, either "true_mutations" or
|
||||
// "false_mutations" will be executed. If unset, checks that the row contains
|
||||
// any values at all.
|
||||
RowFilter predicate_filter = 6;
|
||||
|
||||
// Changes to be atomically applied to the specified row if "predicate_filter"
|
||||
// yields at least one cell when applied to "row_key". Entries are applied in
|
||||
// order, meaning that earlier mutations can be masked by later ones.
|
||||
// Must contain at least one entry if "false_mutations" is empty, and at most
|
||||
// 100000.
|
||||
repeated Mutation true_mutations = 4;
|
||||
|
||||
// Changes to be atomically applied to the specified row if "predicate_filter"
|
||||
// does not yield any cells when applied to "row_key". Entries are applied in
|
||||
// order, meaning that earlier mutations can be masked by later ones.
|
||||
// Must contain at least one entry if "true_mutations" is empty, and at most
|
||||
// 100000.
|
||||
repeated Mutation false_mutations = 5;
|
||||
}
|
||||
|
||||
// Response message for BigtableService.CheckAndMutateRowRequest.
|
||||
message CheckAndMutateRowResponse {
|
||||
// Whether or not the request's "predicate_filter" yielded any results for
|
||||
// the specified row.
|
||||
bool predicate_matched = 1;
|
||||
}
|
||||
|
||||
// Request message for BigtableService.ReadModifyWriteRowRequest.
|
||||
message ReadModifyWriteRowRequest {
|
||||
// The unique name of the table to which the read/modify/write rules should be
|
||||
// applied.
|
||||
string table_name = 1;
|
||||
|
||||
// The key of the row to which the read/modify/write rules should be applied.
|
||||
bytes row_key = 2;
|
||||
|
||||
// Rules specifying how the specified row's contents are to be transformed
|
||||
// into writes. Entries are applied in order, meaning that earlier rules will
|
||||
// affect the results of later ones.
|
||||
repeated ReadModifyWriteRule rules = 3;
|
||||
}
|
365
express-server/node_modules/google-proto-files/google/bigtable/v2/bigtable.proto
generated
vendored
Normal file
365
express-server/node_modules/google-proto-files/google/bigtable/v2/bigtable.proto
generated
vendored
Normal file
@ -0,0 +1,365 @@
|
||||
// Copyright 2018 Google Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.bigtable.v2;
|
||||
|
||||
import "google/api/annotations.proto";
|
||||
import "google/bigtable/v2/data.proto";
|
||||
import "google/protobuf/wrappers.proto";
|
||||
import "google/rpc/status.proto";
|
||||
|
||||
option csharp_namespace = "Google.Cloud.Bigtable.V2";
|
||||
option go_package = "google.golang.org/genproto/googleapis/bigtable/v2;bigtable";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "BigtableProto";
|
||||
option java_package = "com.google.bigtable.v2";
|
||||
option php_namespace = "Google\\Cloud\\Bigtable\\V2";
|
||||
|
||||
|
||||
// Service for reading from and writing to existing Bigtable tables.
|
||||
service Bigtable {
|
||||
// Streams back the contents of all requested rows in key order, optionally
|
||||
// applying the same Reader filter to each. Depending on their size,
|
||||
// rows and cells may be broken up across multiple responses, but
|
||||
// atomicity of each row will still be preserved. See the
|
||||
// ReadRowsResponse documentation for details.
|
||||
rpc ReadRows(ReadRowsRequest) returns (stream ReadRowsResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v2/{table_name=projects/*/instances/*/tables/*}:readRows"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
// Returns a sample of row keys in the table. The returned row keys will
|
||||
// delimit contiguous sections of the table of approximately equal size,
|
||||
// which can be used to break up the data for distributed tasks like
|
||||
// mapreduces.
|
||||
rpc SampleRowKeys(SampleRowKeysRequest) returns (stream SampleRowKeysResponse) {
|
||||
option (google.api.http) = {
|
||||
get: "/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys"
|
||||
};
|
||||
}
|
||||
|
||||
// Mutates a row atomically. Cells already present in the row are left
|
||||
// unchanged unless explicitly changed by `mutation`.
|
||||
rpc MutateRow(MutateRowRequest) returns (MutateRowResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
// Mutates multiple rows in a batch. Each individual row is mutated
|
||||
// atomically as in MutateRow, but the entire batch is not executed
|
||||
// atomically.
|
||||
rpc MutateRows(MutateRowsRequest) returns (stream MutateRowsResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
// Mutates a row atomically based on the output of a predicate Reader filter.
|
||||
rpc CheckAndMutateRow(CheckAndMutateRowRequest) returns (CheckAndMutateRowResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
// Modifies a row atomically on the server. The method reads the latest
|
||||
// existing timestamp and value from the specified columns and writes a new
|
||||
// entry based on pre-defined read/modify/write rules. The new value for the
|
||||
// timestamp is the greater of the existing timestamp or the current server
|
||||
// time. The method returns the new contents of all modified cells.
|
||||
rpc ReadModifyWriteRow(ReadModifyWriteRowRequest) returns (ReadModifyWriteRowResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Request message for Bigtable.ReadRows.
|
||||
message ReadRowsRequest {
|
||||
// The unique name of the table from which to read.
|
||||
// Values are of the form
|
||||
// `projects/<project>/instances/<instance>/tables/<table>`.
|
||||
string table_name = 1;
|
||||
|
||||
// This value specifies routing for replication. If not specified, the
|
||||
// "default" application profile will be used.
|
||||
string app_profile_id = 5;
|
||||
|
||||
// The row keys and/or ranges to read. If not specified, reads from all rows.
|
||||
RowSet rows = 2;
|
||||
|
||||
// The filter to apply to the contents of the specified row(s). If unset,
|
||||
// reads the entirety of each row.
|
||||
RowFilter filter = 3;
|
||||
|
||||
// The read will terminate after committing to N rows' worth of results. The
|
||||
// default (zero) is to return all results.
|
||||
int64 rows_limit = 4;
|
||||
}
|
||||
|
||||
// Response message for Bigtable.ReadRows.
|
||||
message ReadRowsResponse {
|
||||
// Specifies a piece of a row's contents returned as part of the read
|
||||
// response stream.
|
||||
message CellChunk {
|
||||
// The row key for this chunk of data. If the row key is empty,
|
||||
// this CellChunk is a continuation of the same row as the previous
|
||||
// CellChunk in the response stream, even if that CellChunk was in a
|
||||
// previous ReadRowsResponse message.
|
||||
bytes row_key = 1;
|
||||
|
||||
// The column family name for this chunk of data. If this message
|
||||
// is not present this CellChunk is a continuation of the same column
|
||||
// family as the previous CellChunk. The empty string can occur as a
|
||||
// column family name in a response so clients must check
|
||||
// explicitly for the presence of this message, not just for
|
||||
// `family_name.value` being non-empty.
|
||||
google.protobuf.StringValue family_name = 2;
|
||||
|
||||
// The column qualifier for this chunk of data. If this message
|
||||
// is not present, this CellChunk is a continuation of the same column
|
||||
// as the previous CellChunk. Column qualifiers may be empty so
|
||||
// clients must check for the presence of this message, not just
|
||||
// for `qualifier.value` being non-empty.
|
||||
google.protobuf.BytesValue qualifier = 3;
|
||||
|
||||
// The cell's stored timestamp, which also uniquely identifies it
|
||||
// within its column. Values are always expressed in
|
||||
// microseconds, but individual tables may set a coarser
|
||||
// granularity to further restrict the allowed values. For
|
||||
// example, a table which specifies millisecond granularity will
|
||||
// only allow values of `timestamp_micros` which are multiples of
|
||||
// 1000. Timestamps are only set in the first CellChunk per cell
|
||||
// (for cells split into multiple chunks).
|
||||
int64 timestamp_micros = 4;
|
||||
|
||||
// Labels applied to the cell by a
|
||||
// [RowFilter][google.bigtable.v2.RowFilter]. Labels are only set
|
||||
// on the first CellChunk per cell.
|
||||
repeated string labels = 5;
|
||||
|
||||
// The value stored in the cell. Cell values can be split across
|
||||
// multiple CellChunks. In that case only the value field will be
|
||||
// set in CellChunks after the first: the timestamp and labels
|
||||
// will only be present in the first CellChunk, even if the first
|
||||
// CellChunk came in a previous ReadRowsResponse.
|
||||
bytes value = 6;
|
||||
|
||||
// If this CellChunk is part of a chunked cell value and this is
|
||||
// not the final chunk of that cell, value_size will be set to the
|
||||
// total length of the cell value. The client can use this size
|
||||
// to pre-allocate memory to hold the full cell value.
|
||||
int32 value_size = 7;
|
||||
|
||||
oneof row_status {
|
||||
// Indicates that the client should drop all previous chunks for
|
||||
// `row_key`, as it will be re-read from the beginning.
|
||||
bool reset_row = 8;
|
||||
|
||||
// Indicates that the client can safely process all previous chunks for
|
||||
// `row_key`, as its data has been fully read.
|
||||
bool commit_row = 9;
|
||||
}
|
||||
}
|
||||
|
||||
repeated CellChunk chunks = 1;
|
||||
|
||||
// Optionally the server might return the row key of the last row it
|
||||
// has scanned. The client can use this to construct a more
|
||||
// efficient retry request if needed: any row keys or portions of
|
||||
// ranges less than this row key can be dropped from the request.
|
||||
// This is primarily useful for cases where the server has read a
|
||||
// lot of data that was filtered out since the last committed row
|
||||
// key, allowing the client to skip that work on a retry.
|
||||
bytes last_scanned_row_key = 2;
|
||||
}
|
||||
|
||||
// Request message for Bigtable.SampleRowKeys.
|
||||
message SampleRowKeysRequest {
|
||||
// The unique name of the table from which to sample row keys.
|
||||
// Values are of the form
|
||||
// `projects/<project>/instances/<instance>/tables/<table>`.
|
||||
string table_name = 1;
|
||||
|
||||
// This value specifies routing for replication. If not specified, the
|
||||
// "default" application profile will be used.
|
||||
string app_profile_id = 2;
|
||||
}
|
||||
|
||||
// Response message for Bigtable.SampleRowKeys.
|
||||
message SampleRowKeysResponse {
|
||||
// Sorted streamed sequence of sample row keys in the table. The table might
|
||||
// have contents before the first row key in the list and after the last one,
|
||||
// but a key containing the empty string indicates "end of table" and will be
|
||||
// the last response given, if present.
|
||||
// Note that row keys in this list may not have ever been written to or read
|
||||
// from, and users should therefore not make any assumptions about the row key
|
||||
// structure that are specific to their use case.
|
||||
bytes row_key = 1;
|
||||
|
||||
// Approximate total storage space used by all rows in the table which precede
|
||||
// `row_key`. Buffering the contents of all rows between two subsequent
|
||||
// samples would require space roughly equal to the difference in their
|
||||
// `offset_bytes` fields.
|
||||
int64 offset_bytes = 2;
|
||||
}
|
||||
|
||||
// Request message for Bigtable.MutateRow.
|
||||
message MutateRowRequest {
|
||||
// The unique name of the table to which the mutation should be applied.
|
||||
// Values are of the form
|
||||
// `projects/<project>/instances/<instance>/tables/<table>`.
|
||||
string table_name = 1;
|
||||
|
||||
// This value specifies routing for replication. If not specified, the
|
||||
// "default" application profile will be used.
|
||||
string app_profile_id = 4;
|
||||
|
||||
// The key of the row to which the mutation should be applied.
|
||||
bytes row_key = 2;
|
||||
|
||||
// Changes to be atomically applied to the specified row. Entries are applied
|
||||
// in order, meaning that earlier mutations can be masked by later ones.
|
||||
// Must contain at least one entry and at most 100000.
|
||||
repeated Mutation mutations = 3;
|
||||
}
|
||||
|
||||
// Response message for Bigtable.MutateRow.
|
||||
message MutateRowResponse {
|
||||
|
||||
}
|
||||
|
||||
// Request message for BigtableService.MutateRows.
|
||||
message MutateRowsRequest {
|
||||
message Entry {
|
||||
// The key of the row to which the `mutations` should be applied.
|
||||
bytes row_key = 1;
|
||||
|
||||
// Changes to be atomically applied to the specified row. Mutations are
|
||||
// applied in order, meaning that earlier mutations can be masked by
|
||||
// later ones.
|
||||
// You must specify at least one mutation.
|
||||
repeated Mutation mutations = 2;
|
||||
}
|
||||
|
||||
// The unique name of the table to which the mutations should be applied.
|
||||
string table_name = 1;
|
||||
|
||||
// This value specifies routing for replication. If not specified, the
|
||||
// "default" application profile will be used.
|
||||
string app_profile_id = 3;
|
||||
|
||||
// The row keys and corresponding mutations to be applied in bulk.
|
||||
// Each entry is applied as an atomic mutation, but the entries may be
|
||||
// applied in arbitrary order (even between entries for the same row).
|
||||
// At least one entry must be specified, and in total the entries can
|
||||
// contain at most 100000 mutations.
|
||||
repeated Entry entries = 2;
|
||||
}
|
||||
|
||||
// Response message for BigtableService.MutateRows.
|
||||
message MutateRowsResponse {
|
||||
message Entry {
|
||||
// The index into the original request's `entries` list of the Entry
|
||||
// for which a result is being reported.
|
||||
int64 index = 1;
|
||||
|
||||
// The result of the request Entry identified by `index`.
|
||||
// Depending on how requests are batched during execution, it is possible
|
||||
// for one Entry to fail due to an error with another Entry. In the event
|
||||
// that this occurs, the same error will be reported for both entries.
|
||||
google.rpc.Status status = 2;
|
||||
}
|
||||
|
||||
// One or more results for Entries from the batch request.
|
||||
repeated Entry entries = 1;
|
||||
}
|
||||
|
||||
// Request message for Bigtable.CheckAndMutateRow.
|
||||
message CheckAndMutateRowRequest {
|
||||
// The unique name of the table to which the conditional mutation should be
|
||||
// applied.
|
||||
// Values are of the form
|
||||
// `projects/<project>/instances/<instance>/tables/<table>`.
|
||||
string table_name = 1;
|
||||
|
||||
// This value specifies routing for replication. If not specified, the
|
||||
// "default" application profile will be used.
|
||||
string app_profile_id = 7;
|
||||
|
||||
// The key of the row to which the conditional mutation should be applied.
|
||||
bytes row_key = 2;
|
||||
|
||||
// The filter to be applied to the contents of the specified row. Depending
|
||||
// on whether or not any results are yielded, either `true_mutations` or
|
||||
// `false_mutations` will be executed. If unset, checks that the row contains
|
||||
// any values at all.
|
||||
RowFilter predicate_filter = 6;
|
||||
|
||||
// Changes to be atomically applied to the specified row if `predicate_filter`
|
||||
// yields at least one cell when applied to `row_key`. Entries are applied in
|
||||
// order, meaning that earlier mutations can be masked by later ones.
|
||||
// Must contain at least one entry if `false_mutations` is empty, and at most
|
||||
// 100000.
|
||||
repeated Mutation true_mutations = 4;
|
||||
|
||||
// Changes to be atomically applied to the specified row if `predicate_filter`
|
||||
// does not yield any cells when applied to `row_key`. Entries are applied in
|
||||
// order, meaning that earlier mutations can be masked by later ones.
|
||||
// Must contain at least one entry if `true_mutations` is empty, and at most
|
||||
// 100000.
|
||||
repeated Mutation false_mutations = 5;
|
||||
}
|
||||
|
||||
// Response message for Bigtable.CheckAndMutateRow.
|
||||
message CheckAndMutateRowResponse {
|
||||
// Whether or not the request's `predicate_filter` yielded any results for
|
||||
// the specified row.
|
||||
bool predicate_matched = 1;
|
||||
}
|
||||
|
||||
// Request message for Bigtable.ReadModifyWriteRow.
|
||||
message ReadModifyWriteRowRequest {
|
||||
// The unique name of the table to which the read/modify/write rules should be
|
||||
// applied.
|
||||
// Values are of the form
|
||||
// `projects/<project>/instances/<instance>/tables/<table>`.
|
||||
string table_name = 1;
|
||||
|
||||
// This value specifies routing for replication. If not specified, the
|
||||
// "default" application profile will be used.
|
||||
string app_profile_id = 4;
|
||||
|
||||
// The key of the row to which the read/modify/write rules should be applied.
|
||||
bytes row_key = 2;
|
||||
|
||||
// Rules specifying how the specified row's contents are to be transformed
|
||||
// into writes. Entries are applied in order, meaning that earlier rules will
|
||||
// affect the results of later ones.
|
||||
repeated ReadModifyWriteRule rules = 3;
|
||||
}
|
||||
|
||||
// Response message for Bigtable.ReadModifyWriteRow.
|
||||
message ReadModifyWriteRowResponse {
|
||||
// A Row containing the new contents of all cells modified by the request.
|
||||
Row row = 1;
|
||||
}
|
535
express-server/node_modules/google-proto-files/google/bigtable/v2/data.proto
generated
vendored
Normal file
535
express-server/node_modules/google-proto-files/google/bigtable/v2/data.proto
generated
vendored
Normal file
@ -0,0 +1,535 @@
|
||||
// Copyright 2018 Google Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.bigtable.v2;
|
||||
|
||||
option csharp_namespace = "Google.Cloud.Bigtable.V2";
|
||||
option go_package = "google.golang.org/genproto/googleapis/bigtable/v2;bigtable";
|
||||
option java_multiple_files = true;
|
||||
option java_outer_classname = "DataProto";
|
||||
option java_package = "com.google.bigtable.v2";
|
||||
option php_namespace = "Google\\Cloud\\Bigtable\\V2";
|
||||
|
||||
|
||||
// Specifies the complete (requested) contents of a single row of a table.
|
||||
// Rows which exceed 256MiB in size cannot be read in full.
|
||||
message Row {
|
||||
// The unique key which identifies this row within its table. This is the same
|
||||
// key that's used to identify the row in, for example, a MutateRowRequest.
|
||||
// May contain any non-empty byte string up to 4KiB in length.
|
||||
bytes key = 1;
|
||||
|
||||
// May be empty, but only if the entire row is empty.
|
||||
// The mutual ordering of column families is not specified.
|
||||
repeated Family families = 2;
|
||||
}
|
||||
|
||||
// Specifies (some of) the contents of a single row/column family intersection
|
||||
// of a table.
|
||||
message Family {
|
||||
// The unique key which identifies this family within its row. This is the
|
||||
// same key that's used to identify the family in, for example, a RowFilter
|
||||
// which sets its "family_name_regex_filter" field.
|
||||
// Must match `[-_.a-zA-Z0-9]+`, except that AggregatingRowProcessors may
|
||||
// produce cells in a sentinel family with an empty name.
|
||||
// Must be no greater than 64 characters in length.
|
||||
string name = 1;
|
||||
|
||||
// Must not be empty. Sorted in order of increasing "qualifier".
|
||||
repeated Column columns = 2;
|
||||
}
|
||||
|
||||
// Specifies (some of) the contents of a single row/column intersection of a
|
||||
// table.
|
||||
message Column {
|
||||
// The unique key which identifies this column within its family. This is the
|
||||
// same key that's used to identify the column in, for example, a RowFilter
|
||||
// which sets its `column_qualifier_regex_filter` field.
|
||||
// May contain any byte string, including the empty string, up to 16kiB in
|
||||
// length.
|
||||
bytes qualifier = 1;
|
||||
|
||||
// Must not be empty. Sorted in order of decreasing "timestamp_micros".
|
||||
repeated Cell cells = 2;
|
||||
}
|
||||
|
||||
// Specifies (some of) the contents of a single row/column/timestamp of a table.
|
||||
message Cell {
|
||||
// The cell's stored timestamp, which also uniquely identifies it within
|
||||
// its column.
|
||||
// Values are always expressed in microseconds, but individual tables may set
|
||||
// a coarser granularity to further restrict the allowed values. For
|
||||
// example, a table which specifies millisecond granularity will only allow
|
||||
// values of `timestamp_micros` which are multiples of 1000.
|
||||
int64 timestamp_micros = 1;
|
||||
|
||||
// The value stored in the cell.
|
||||
// May contain any byte string, including the empty string, up to 100MiB in
|
||||
// length.
|
||||
bytes value = 2;
|
||||
|
||||
// Labels applied to the cell by a [RowFilter][google.bigtable.v2.RowFilter].
|
||||
repeated string labels = 3;
|
||||
}
|
||||
|
||||
// Specifies a contiguous range of rows.
|
||||
message RowRange {
|
||||
// The row key at which to start the range.
|
||||
// If neither field is set, interpreted as the empty string, inclusive.
|
||||
oneof start_key {
|
||||
// Used when giving an inclusive lower bound for the range.
|
||||
bytes start_key_closed = 1;
|
||||
|
||||
// Used when giving an exclusive lower bound for the range.
|
||||
bytes start_key_open = 2;
|
||||
}
|
||||
|
||||
// The row key at which to end the range.
|
||||
// If neither field is set, interpreted as the infinite row key, exclusive.
|
||||
oneof end_key {
|
||||
// Used when giving an exclusive upper bound for the range.
|
||||
bytes end_key_open = 3;
|
||||
|
||||
// Used when giving an inclusive upper bound for the range.
|
||||
bytes end_key_closed = 4;
|
||||
}
|
||||
}
|
||||
|
||||
// Specifies a non-contiguous set of rows.
|
||||
message RowSet {
|
||||
// Single rows included in the set.
|
||||
repeated bytes row_keys = 1;
|
||||
|
||||
// Contiguous row ranges included in the set.
|
||||
repeated RowRange row_ranges = 2;
|
||||
}
|
||||
|
||||
// Specifies a contiguous range of columns within a single column family.
|
||||
// The range spans from <column_family>:<start_qualifier> to
|
||||
// <column_family>:<end_qualifier>, where both bounds can be either
|
||||
// inclusive or exclusive.
|
||||
message ColumnRange {
|
||||
// The name of the column family within which this range falls.
|
||||
string family_name = 1;
|
||||
|
||||
// The column qualifier at which to start the range (within `column_family`).
|
||||
// If neither field is set, interpreted as the empty string, inclusive.
|
||||
oneof start_qualifier {
|
||||
// Used when giving an inclusive lower bound for the range.
|
||||
bytes start_qualifier_closed = 2;
|
||||
|
||||
// Used when giving an exclusive lower bound for the range.
|
||||
bytes start_qualifier_open = 3;
|
||||
}
|
||||
|
||||
// The column qualifier at which to end the range (within `column_family`).
|
||||
// If neither field is set, interpreted as the infinite string, exclusive.
|
||||
oneof end_qualifier {
|
||||
// Used when giving an inclusive upper bound for the range.
|
||||
bytes end_qualifier_closed = 4;
|
||||
|
||||
// Used when giving an exclusive upper bound for the range.
|
||||
bytes end_qualifier_open = 5;
|
||||
}
|
||||
}
|
||||
|
||||
// Specified a contiguous range of microsecond timestamps.
|
||||
message TimestampRange {
|
||||
// Inclusive lower bound. If left empty, interpreted as 0.
|
||||
int64 start_timestamp_micros = 1;
|
||||
|
||||
// Exclusive upper bound. If left empty, interpreted as infinity.
|
||||
int64 end_timestamp_micros = 2;
|
||||
}
|
||||
|
||||
// Specifies a contiguous range of raw byte values.
|
||||
message ValueRange {
|
||||
// The value at which to start the range.
|
||||
// If neither field is set, interpreted as the empty string, inclusive.
|
||||
oneof start_value {
|
||||
// Used when giving an inclusive lower bound for the range.
|
||||
bytes start_value_closed = 1;
|
||||
|
||||
// Used when giving an exclusive lower bound for the range.
|
||||
bytes start_value_open = 2;
|
||||
}
|
||||
|
||||
// The value at which to end the range.
|
||||
// If neither field is set, interpreted as the infinite string, exclusive.
|
||||
oneof end_value {
|
||||
// Used when giving an inclusive upper bound for the range.
|
||||
bytes end_value_closed = 3;
|
||||
|
||||
// Used when giving an exclusive upper bound for the range.
|
||||
bytes end_value_open = 4;
|
||||
}
|
||||
}
|
||||
|
||||
// Takes a row as input and produces an alternate view of the row based on
|
||||
// specified rules. For example, a RowFilter might trim down a row to include
|
||||
// just the cells from columns matching a given regular expression, or might
|
||||
// return all the cells of a row but not their values. More complicated filters
|
||||
// can be composed out of these components to express requests such as, "within
|
||||
// every column of a particular family, give just the two most recent cells
|
||||
// which are older than timestamp X."
|
||||
//
|
||||
// There are two broad categories of RowFilters (true filters and transformers),
|
||||
// as well as two ways to compose simple filters into more complex ones
|
||||
// (chains and interleaves). They work as follows:
|
||||
//
|
||||
// * True filters alter the input row by excluding some of its cells wholesale
|
||||
// from the output row. An example of a true filter is the `value_regex_filter`,
|
||||
// which excludes cells whose values don't match the specified pattern. All
|
||||
// regex true filters use RE2 syntax (https://github.com/google/re2/wiki/Syntax)
|
||||
// in raw byte mode (RE2::Latin1), and are evaluated as full matches. An
|
||||
// important point to keep in mind is that `RE2(.)` is equivalent by default to
|
||||
// `RE2([^\n])`, meaning that it does not match newlines. When attempting to
|
||||
// match an arbitrary byte, you should therefore use the escape sequence `\C`,
|
||||
// which may need to be further escaped as `\\C` in your client language.
|
||||
//
|
||||
// * Transformers alter the input row by changing the values of some of its
|
||||
// cells in the output, without excluding them completely. Currently, the only
|
||||
// supported transformer is the `strip_value_transformer`, which replaces every
|
||||
// cell's value with the empty string.
|
||||
//
|
||||
// * Chains and interleaves are described in more detail in the
|
||||
// RowFilter.Chain and RowFilter.Interleave documentation.
|
||||
//
|
||||
// The total serialized size of a RowFilter message must not
|
||||
// exceed 4096 bytes, and RowFilters may not be nested within each other
|
||||
// (in Chains or Interleaves) to a depth of more than 20.
|
||||
message RowFilter {
|
||||
// A RowFilter which sends rows through several RowFilters in sequence.
|
||||
message Chain {
|
||||
// The elements of "filters" are chained together to process the input row:
|
||||
// in row -> f(0) -> intermediate row -> f(1) -> ... -> f(N) -> out row
|
||||
// The full chain is executed atomically.
|
||||
repeated RowFilter filters = 1;
|
||||
}
|
||||
|
||||
// A RowFilter which sends each row to each of several component
|
||||
// RowFilters and interleaves the results.
|
||||
message Interleave {
|
||||
// The elements of "filters" all process a copy of the input row, and the
|
||||
// results are pooled, sorted, and combined into a single output row.
|
||||
// If multiple cells are produced with the same column and timestamp,
|
||||
// they will all appear in the output row in an unspecified mutual order.
|
||||
// Consider the following example, with three filters:
|
||||
//
|
||||
// input row
|
||||
// |
|
||||
// -----------------------------------------------------
|
||||
// | | |
|
||||
// f(0) f(1) f(2)
|
||||
// | | |
|
||||
// 1: foo,bar,10,x foo,bar,10,z far,bar,7,a
|
||||
// 2: foo,blah,11,z far,blah,5,x far,blah,5,x
|
||||
// | | |
|
||||
// -----------------------------------------------------
|
||||
// |
|
||||
// 1: foo,bar,10,z // could have switched with #2
|
||||
// 2: foo,bar,10,x // could have switched with #1
|
||||
// 3: foo,blah,11,z
|
||||
// 4: far,bar,7,a
|
||||
// 5: far,blah,5,x // identical to #6
|
||||
// 6: far,blah,5,x // identical to #5
|
||||
//
|
||||
// All interleaved filters are executed atomically.
|
||||
repeated RowFilter filters = 1;
|
||||
}
|
||||
|
||||
// A RowFilter which evaluates one of two possible RowFilters, depending on
|
||||
// whether or not a predicate RowFilter outputs any cells from the input row.
|
||||
//
|
||||
// IMPORTANT NOTE: The predicate filter does not execute atomically with the
|
||||
// true and false filters, which may lead to inconsistent or unexpected
|
||||
// results. Additionally, Condition filters have poor performance, especially
|
||||
// when filters are set for the false condition.
|
||||
message Condition {
|
||||
// If `predicate_filter` outputs any cells, then `true_filter` will be
|
||||
// evaluated on the input row. Otherwise, `false_filter` will be evaluated.
|
||||
RowFilter predicate_filter = 1;
|
||||
|
||||
// The filter to apply to the input row if `predicate_filter` returns any
|
||||
// results. If not provided, no results will be returned in the true case.
|
||||
RowFilter true_filter = 2;
|
||||
|
||||
// The filter to apply to the input row if `predicate_filter` does not
|
||||
// return any results. If not provided, no results will be returned in the
|
||||
// false case.
|
||||
RowFilter false_filter = 3;
|
||||
}
|
||||
|
||||
// Which of the possible RowFilter types to apply. If none are set, this
|
||||
// RowFilter returns all cells in the input row.
|
||||
oneof filter {
|
||||
// Applies several RowFilters to the data in sequence, progressively
|
||||
// narrowing the results.
|
||||
Chain chain = 1;
|
||||
|
||||
// Applies several RowFilters to the data in parallel and combines the
|
||||
// results.
|
||||
Interleave interleave = 2;
|
||||
|
||||
// Applies one of two possible RowFilters to the data based on the output of
|
||||
// a predicate RowFilter.
|
||||
Condition condition = 3;
|
||||
|
||||
// ADVANCED USE ONLY.
|
||||
// Hook for introspection into the RowFilter. Outputs all cells directly to
|
||||
// the output of the read rather than to any parent filter. Consider the
|
||||
// following example:
|
||||
//
|
||||
// Chain(
|
||||
// FamilyRegex("A"),
|
||||
// Interleave(
|
||||
// All(),
|
||||
// Chain(Label("foo"), Sink())
|
||||
// ),
|
||||
// QualifierRegex("B")
|
||||
// )
|
||||
//
|
||||
// A,A,1,w
|
||||
// A,B,2,x
|
||||
// B,B,4,z
|
||||
// |
|
||||
// FamilyRegex("A")
|
||||
// |
|
||||
// A,A,1,w
|
||||
// A,B,2,x
|
||||
// |
|
||||
// +------------+-------------+
|
||||
// | |
|
||||
// All() Label(foo)
|
||||
// | |
|
||||
// A,A,1,w A,A,1,w,labels:[foo]
|
||||
// A,B,2,x A,B,2,x,labels:[foo]
|
||||
// | |
|
||||
// | Sink() --------------+
|
||||
// | | |
|
||||
// +------------+ x------+ A,A,1,w,labels:[foo]
|
||||
// | A,B,2,x,labels:[foo]
|
||||
// A,A,1,w |
|
||||
// A,B,2,x |
|
||||
// | |
|
||||
// QualifierRegex("B") |
|
||||
// | |
|
||||
// A,B,2,x |
|
||||
// | |
|
||||
// +--------------------------------+
|
||||
// |
|
||||
// A,A,1,w,labels:[foo]
|
||||
// A,B,2,x,labels:[foo] // could be switched
|
||||
// A,B,2,x // could be switched
|
||||
//
|
||||
// Despite being excluded by the qualifier filter, a copy of every cell
|
||||
// that reaches the sink is present in the final result.
|
||||
//
|
||||
// As with an [Interleave][google.bigtable.v2.RowFilter.Interleave],
|
||||
// duplicate cells are possible, and appear in an unspecified mutual order.
|
||||
// In this case we have a duplicate with column "A:B" and timestamp 2,
|
||||
// because one copy passed through the all filter while the other was
|
||||
// passed through the label and sink. Note that one copy has label "foo",
|
||||
// while the other does not.
|
||||
//
|
||||
// Cannot be used within the `predicate_filter`, `true_filter`, or
|
||||
// `false_filter` of a [Condition][google.bigtable.v2.RowFilter.Condition].
|
||||
bool sink = 16;
|
||||
|
||||
// Matches all cells, regardless of input. Functionally equivalent to
|
||||
// leaving `filter` unset, but included for completeness.
|
||||
bool pass_all_filter = 17;
|
||||
|
||||
// Does not match any cells, regardless of input. Useful for temporarily
|
||||
// disabling just part of a filter.
|
||||
bool block_all_filter = 18;
|
||||
|
||||
// Matches only cells from rows whose keys satisfy the given RE2 regex. In
|
||||
// other words, passes through the entire row when the key matches, and
|
||||
// otherwise produces an empty row.
|
||||
// Note that, since row keys can contain arbitrary bytes, the `\C` escape
|
||||
// sequence must be used if a true wildcard is desired. The `.` character
|
||||
// will not match the new line character `\n`, which may be present in a
|
||||
// binary key.
|
||||
bytes row_key_regex_filter = 4;
|
||||
|
||||
// Matches all cells from a row with probability p, and matches no cells
|
||||
// from the row with probability 1-p.
|
||||
double row_sample_filter = 14;
|
||||
|
||||
// Matches only cells from columns whose families satisfy the given RE2
|
||||
// regex. For technical reasons, the regex must not contain the `:`
|
||||
// character, even if it is not being used as a literal.
|
||||
// Note that, since column families cannot contain the new line character
|
||||
// `\n`, it is sufficient to use `.` as a full wildcard when matching
|
||||
// column family names.
|
||||
string family_name_regex_filter = 5;
|
||||
|
||||
// Matches only cells from columns whose qualifiers satisfy the given RE2
|
||||
// regex.
|
||||
// Note that, since column qualifiers can contain arbitrary bytes, the `\C`
|
||||
// escape sequence must be used if a true wildcard is desired. The `.`
|
||||
// character will not match the new line character `\n`, which may be
|
||||
// present in a binary qualifier.
|
||||
bytes column_qualifier_regex_filter = 6;
|
||||
|
||||
// Matches only cells from columns within the given range.
|
||||
ColumnRange column_range_filter = 7;
|
||||
|
||||
// Matches only cells with timestamps within the given range.
|
||||
TimestampRange timestamp_range_filter = 8;
|
||||
|
||||
// Matches only cells with values that satisfy the given regular expression.
|
||||
// Note that, since cell values can contain arbitrary bytes, the `\C` escape
|
||||
// sequence must be used if a true wildcard is desired. The `.` character
|
||||
// will not match the new line character `\n`, which may be present in a
|
||||
// binary value.
|
||||
bytes value_regex_filter = 9;
|
||||
|
||||
// Matches only cells with values that fall within the given range.
|
||||
ValueRange value_range_filter = 15;
|
||||
|
||||
// Skips the first N cells of each row, matching all subsequent cells.
|
||||
// If duplicate cells are present, as is possible when using an Interleave,
|
||||
// each copy of the cell is counted separately.
|
||||
int32 cells_per_row_offset_filter = 10;
|
||||
|
||||
// Matches only the first N cells of each row.
|
||||
// If duplicate cells are present, as is possible when using an Interleave,
|
||||
// each copy of the cell is counted separately.
|
||||
int32 cells_per_row_limit_filter = 11;
|
||||
|
||||
// Matches only the most recent N cells within each column. For example,
|
||||
// if N=2, this filter would match column `foo:bar` at timestamps 10 and 9,
|
||||
// skip all earlier cells in `foo:bar`, and then begin matching again in
|
||||
// column `foo:bar2`.
|
||||
// If duplicate cells are present, as is possible when using an Interleave,
|
||||
// each copy of the cell is counted separately.
|
||||
int32 cells_per_column_limit_filter = 12;
|
||||
|
||||
// Replaces each cell's value with the empty string.
|
||||
bool strip_value_transformer = 13;
|
||||
|
||||
// Applies the given label to all cells in the output row. This allows
|
||||
// the client to determine which results were produced from which part of
|
||||
// the filter.
|
||||
//
|
||||
// Values must be at most 15 characters in length, and match the RE2
|
||||
// pattern `[a-z0-9\\-]+`
|
||||
//
|
||||
// Due to a technical limitation, it is not currently possible to apply
|
||||
// multiple labels to a cell. As a result, a Chain may have no more than
|
||||
// one sub-filter which contains a `apply_label_transformer`. It is okay for
|
||||
// an Interleave to contain multiple `apply_label_transformers`, as they
|
||||
// will be applied to separate copies of the input. This may be relaxed in
|
||||
// the future.
|
||||
string apply_label_transformer = 19;
|
||||
}
|
||||
}
|
||||
|
||||
// Specifies a particular change to be made to the contents of a row.
|
||||
message Mutation {
|
||||
// A Mutation which sets the value of the specified cell.
|
||||
message SetCell {
|
||||
// The name of the family into which new data should be written.
|
||||
// Must match `[-_.a-zA-Z0-9]+`
|
||||
string family_name = 1;
|
||||
|
||||
// The qualifier of the column into which new data should be written.
|
||||
// Can be any byte string, including the empty string.
|
||||
bytes column_qualifier = 2;
|
||||
|
||||
// The timestamp of the cell into which new data should be written.
|
||||
// Use -1 for current Bigtable server time.
|
||||
// Otherwise, the client should set this value itself, noting that the
|
||||
// default value is a timestamp of zero if the field is left unspecified.
|
||||
// Values must match the granularity of the table (e.g. micros, millis).
|
||||
int64 timestamp_micros = 3;
|
||||
|
||||
// The value to be written into the specified cell.
|
||||
bytes value = 4;
|
||||
}
|
||||
|
||||
// A Mutation which deletes cells from the specified column, optionally
|
||||
// restricting the deletions to a given timestamp range.
|
||||
message DeleteFromColumn {
|
||||
// The name of the family from which cells should be deleted.
|
||||
// Must match `[-_.a-zA-Z0-9]+`
|
||||
string family_name = 1;
|
||||
|
||||
// The qualifier of the column from which cells should be deleted.
|
||||
// Can be any byte string, including the empty string.
|
||||
bytes column_qualifier = 2;
|
||||
|
||||
// The range of timestamps within which cells should be deleted.
|
||||
TimestampRange time_range = 3;
|
||||
}
|
||||
|
||||
// A Mutation which deletes all cells from the specified column family.
|
||||
message DeleteFromFamily {
|
||||
// The name of the family from which cells should be deleted.
|
||||
// Must match `[-_.a-zA-Z0-9]+`
|
||||
string family_name = 1;
|
||||
}
|
||||
|
||||
// A Mutation which deletes all cells from the containing row.
|
||||
message DeleteFromRow {
|
||||
|
||||
}
|
||||
|
||||
// Which of the possible Mutation types to apply.
|
||||
oneof mutation {
|
||||
// Set a cell's value.
|
||||
SetCell set_cell = 1;
|
||||
|
||||
// Deletes cells from a column.
|
||||
DeleteFromColumn delete_from_column = 2;
|
||||
|
||||
// Deletes cells from a column family.
|
||||
DeleteFromFamily delete_from_family = 3;
|
||||
|
||||
// Deletes cells from the entire row.
|
||||
DeleteFromRow delete_from_row = 4;
|
||||
}
|
||||
}
|
||||
|
||||
// Specifies an atomic read/modify/write operation on the latest value of the
|
||||
// specified column.
|
||||
message ReadModifyWriteRule {
|
||||
// The name of the family to which the read/modify/write should be applied.
|
||||
// Must match `[-_.a-zA-Z0-9]+`
|
||||
string family_name = 1;
|
||||
|
||||
// The qualifier of the column to which the read/modify/write should be
|
||||
// applied.
|
||||
// Can be any byte string, including the empty string.
|
||||
bytes column_qualifier = 2;
|
||||
|
||||
// The rule used to determine the column's new latest value from its current
|
||||
// latest value.
|
||||
oneof rule {
|
||||
// Rule specifying that `append_value` be appended to the existing value.
|
||||
// If the targeted cell is unset, it will be treated as containing the
|
||||
// empty string.
|
||||
bytes append_value = 3;
|
||||
|
||||
// Rule specifying that `increment_amount` be added to the existing value.
|
||||
// If the targeted cell is unset, it will be treated as containing a zero.
|
||||
// Otherwise, the targeted cell must contain an 8-byte value (interpreted
|
||||
// as a 64-bit big-endian signed integer), or the entire request will fail.
|
||||
int64 increment_amount = 4;
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user