2018-10-11 01:16:33 -07:00
|
|
|
syntax = "proto3";
|
|
|
|
|
|
|
|
|
|
package volume_server_pb;
|
2022-07-29 00:17:28 -07:00
|
|
|
option go_package = "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb";
|
2018-10-11 01:16:33 -07:00
|
|
|
|
2021-08-26 15:18:34 -07:00
|
|
|
import "remote.proto";
|
|
|
|
|
|
2018-10-11 01:16:33 -07:00
|
|
|
//////////////////////////////////////////////////
|
|
|
|
|
|
|
|
|
|
service VolumeServer {
|
|
|
|
|
//Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas.
|
|
|
|
|
rpc BatchDelete (BatchDeleteRequest) returns (BatchDeleteResponse) {
|
|
|
|
|
}
|
2020-02-14 00:37:32 -08:00
|
|
|
|
2018-10-14 23:12:43 -07:00
|
|
|
rpc VacuumVolumeCheck (VacuumVolumeCheckRequest) returns (VacuumVolumeCheckResponse) {
|
|
|
|
|
}
|
2021-10-24 01:55:34 -07:00
|
|
|
rpc VacuumVolumeCompact (VacuumVolumeCompactRequest) returns (stream VacuumVolumeCompactResponse) {
|
2018-10-14 23:12:43 -07:00
|
|
|
}
|
|
|
|
|
rpc VacuumVolumeCommit (VacuumVolumeCommitRequest) returns (VacuumVolumeCommitResponse) {
|
|
|
|
|
}
|
|
|
|
|
rpc VacuumVolumeCleanup (VacuumVolumeCleanupRequest) returns (VacuumVolumeCleanupResponse) {
|
|
|
|
|
}
|
2018-10-15 00:40:46 -07:00
|
|
|
|
2018-10-15 00:03:55 -07:00
|
|
|
rpc DeleteCollection (DeleteCollectionRequest) returns (DeleteCollectionResponse) {
|
|
|
|
|
}
|
2019-04-10 21:41:17 -07:00
|
|
|
rpc AllocateVolume (AllocateVolumeRequest) returns (AllocateVolumeResponse) {
|
2018-10-15 00:40:46 -07:00
|
|
|
}
|
2018-10-15 21:44:41 -07:00
|
|
|
|
2018-10-15 01:19:15 -07:00
|
|
|
rpc VolumeSyncStatus (VolumeSyncStatusRequest) returns (VolumeSyncStatusResponse) {
|
|
|
|
|
}
|
2019-04-17 22:04:49 -07:00
|
|
|
rpc VolumeIncrementalCopy (VolumeIncrementalCopyRequest) returns (stream VolumeIncrementalCopyResponse) {
|
2019-03-25 09:16:12 -07:00
|
|
|
}
|
2018-10-15 01:48:15 -07:00
|
|
|
|
|
|
|
|
rpc VolumeMount (VolumeMountRequest) returns (VolumeMountResponse) {
|
|
|
|
|
}
|
|
|
|
|
rpc VolumeUnmount (VolumeUnmountRequest) returns (VolumeUnmountResponse) {
|
|
|
|
|
}
|
2018-12-29 00:03:30 -08:00
|
|
|
rpc VolumeDelete (VolumeDeleteRequest) returns (VolumeDeleteResponse) {
|
|
|
|
|
}
|
2019-06-26 23:02:22 -07:00
|
|
|
rpc VolumeMarkReadonly (VolumeMarkReadonlyRequest) returns (VolumeMarkReadonlyResponse) {
|
|
|
|
|
}
|
2020-08-19 11:42:56 -04:00
|
|
|
rpc VolumeMarkWritable (VolumeMarkWritableRequest) returns (VolumeMarkWritableResponse) {
|
|
|
|
|
}
|
2020-02-02 15:37:23 -08:00
|
|
|
rpc VolumeConfigure (VolumeConfigureRequest) returns (VolumeConfigureResponse) {
|
|
|
|
|
}
|
2020-08-19 11:42:56 -04:00
|
|
|
rpc VolumeStatus (VolumeStatusRequest) returns (VolumeStatusResponse) {
|
|
|
|
|
}
|
2018-10-15 01:48:15 -07:00
|
|
|
|
2019-04-17 22:04:49 -07:00
|
|
|
// copy the .idx .dat files, and mount this volume
|
2021-10-24 02:52:56 -07:00
|
|
|
rpc VolumeCopy (VolumeCopyRequest) returns (stream VolumeCopyResponse) {
|
2019-03-23 11:33:34 -07:00
|
|
|
}
|
|
|
|
|
rpc ReadVolumeFileStatus (ReadVolumeFileStatusRequest) returns (ReadVolumeFileStatusResponse) {
|
|
|
|
|
}
|
|
|
|
|
rpc CopyFile (CopyFileRequest) returns (stream CopyFileResponse) {
|
|
|
|
|
}
|
2025-07-30 12:38:03 -07:00
|
|
|
rpc ReceiveFile (stream ReceiveFileRequest) returns (ReceiveFileResponse) {
|
|
|
|
|
}
|
2018-10-15 22:25:28 -07:00
|
|
|
|
2021-03-22 00:03:16 -07:00
|
|
|
rpc ReadNeedleBlob (ReadNeedleBlobRequest) returns (ReadNeedleBlobResponse) {
|
|
|
|
|
}
|
2022-09-06 23:51:27 -07:00
|
|
|
rpc ReadNeedleMeta (ReadNeedleMetaRequest) returns (ReadNeedleMetaResponse) {
|
|
|
|
|
}
|
2021-03-22 00:03:16 -07:00
|
|
|
rpc WriteNeedleBlob (WriteNeedleBlobRequest) returns (WriteNeedleBlobResponse) {
|
|
|
|
|
}
|
2021-09-27 01:45:32 -07:00
|
|
|
rpc ReadAllNeedles (ReadAllNeedlesRequest) returns (stream ReadAllNeedlesResponse) {
|
|
|
|
|
}
|
2021-03-22 00:03:16 -07:00
|
|
|
|
2019-04-20 11:35:20 -07:00
|
|
|
rpc VolumeTailSender (VolumeTailSenderRequest) returns (stream VolumeTailSenderResponse) {
|
|
|
|
|
}
|
|
|
|
|
rpc VolumeTailReceiver (VolumeTailReceiverRequest) returns (VolumeTailReceiverResponse) {
|
2019-04-18 00:18:29 -07:00
|
|
|
}
|
|
|
|
|
|
2019-05-20 00:53:17 -07:00
|
|
|
// erasure coding
|
2019-05-25 14:02:06 -07:00
|
|
|
rpc VolumeEcShardsGenerate (VolumeEcShardsGenerateRequest) returns (VolumeEcShardsGenerateResponse) {
|
2019-05-20 00:53:17 -07:00
|
|
|
}
|
2019-06-03 02:26:31 -07:00
|
|
|
rpc VolumeEcShardsRebuild (VolumeEcShardsRebuildRequest) returns (VolumeEcShardsRebuildResponse) {
|
|
|
|
|
}
|
2019-05-25 14:02:06 -07:00
|
|
|
rpc VolumeEcShardsCopy (VolumeEcShardsCopyRequest) returns (VolumeEcShardsCopyResponse) {
|
|
|
|
|
}
|
|
|
|
|
rpc VolumeEcShardsDelete (VolumeEcShardsDeleteRequest) returns (VolumeEcShardsDeleteResponse) {
|
2019-05-20 00:53:17 -07:00
|
|
|
}
|
2019-05-25 23:23:19 -07:00
|
|
|
rpc VolumeEcShardsMount (VolumeEcShardsMountRequest) returns (VolumeEcShardsMountResponse) {
|
|
|
|
|
}
|
|
|
|
|
rpc VolumeEcShardsUnmount (VolumeEcShardsUnmountRequest) returns (VolumeEcShardsUnmountResponse) {
|
|
|
|
|
}
|
2019-05-27 11:59:03 -07:00
|
|
|
rpc VolumeEcShardRead (VolumeEcShardReadRequest) returns (stream VolumeEcShardReadResponse) {
|
|
|
|
|
}
|
2019-06-20 00:17:11 -07:00
|
|
|
rpc VolumeEcBlobDelete (VolumeEcBlobDeleteRequest) returns (VolumeEcBlobDeleteResponse) {
|
|
|
|
|
}
|
2019-12-23 12:48:20 -08:00
|
|
|
rpc VolumeEcShardsToVolume (VolumeEcShardsToVolumeRequest) returns (VolumeEcShardsToVolumeResponse) {
|
|
|
|
|
}
|
2025-07-30 12:38:03 -07:00
|
|
|
rpc VolumeEcShardsInfo (VolumeEcShardsInfoRequest) returns (VolumeEcShardsInfoResponse) {
|
|
|
|
|
}
|
2019-05-20 00:53:17 -07:00
|
|
|
|
2019-11-27 03:09:42 -08:00
|
|
|
// tiered storage
|
2019-12-25 09:53:13 -08:00
|
|
|
rpc VolumeTierMoveDatToRemote (VolumeTierMoveDatToRemoteRequest) returns (stream VolumeTierMoveDatToRemoteResponse) {
|
|
|
|
|
}
|
|
|
|
|
rpc VolumeTierMoveDatFromRemote (VolumeTierMoveDatFromRemoteRequest) returns (stream VolumeTierMoveDatFromRemoteResponse) {
|
2019-11-27 03:09:42 -08:00
|
|
|
}
|
|
|
|
|
|
2020-02-21 21:45:03 -08:00
|
|
|
rpc VolumeServerStatus (VolumeServerStatusRequest) returns (VolumeServerStatusResponse) {
|
|
|
|
|
}
|
2020-09-13 21:25:51 -07:00
|
|
|
rpc VolumeServerLeave (VolumeServerLeaveRequest) returns (VolumeServerLeaveResponse) {
|
|
|
|
|
}
|
2020-02-21 21:45:03 -08:00
|
|
|
|
2021-08-07 14:18:53 -07:00
|
|
|
// remote storage
|
|
|
|
|
rpc FetchAndWriteNeedle (FetchAndWriteNeedleRequest) returns (FetchAndWriteNeedleResponse) {
|
|
|
|
|
}
|
|
|
|
|
|
2020-02-21 21:45:03 -08:00
|
|
|
// <experimental> query
|
2019-10-02 12:06:03 -07:00
|
|
|
rpc Query (QueryRequest) returns (stream QueriedStripe) {
|
|
|
|
|
}
|
|
|
|
|
|
2020-07-22 15:02:21 -04:00
|
|
|
rpc VolumeNeedleStatus (VolumeNeedleStatusRequest) returns (VolumeNeedleStatusResponse) {
|
|
|
|
|
}
|
2022-04-01 16:37:06 -07:00
|
|
|
|
|
|
|
|
rpc Ping (PingRequest) returns (PingResponse) {
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-11 01:16:33 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////
|
|
|
|
|
|
|
|
|
|
message BatchDeleteRequest {
|
|
|
|
|
repeated string file_ids = 1;
|
2020-03-30 01:19:33 -07:00
|
|
|
bool skip_cookie_check = 2;
|
2018-10-11 01:16:33 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
message BatchDeleteResponse {
|
|
|
|
|
repeated DeleteResult results = 1;
|
|
|
|
|
}
|
|
|
|
|
message DeleteResult {
|
|
|
|
|
string file_id = 1;
|
|
|
|
|
int32 status = 2;
|
|
|
|
|
string error = 3;
|
|
|
|
|
uint32 size = 4;
|
2019-06-20 00:17:11 -07:00
|
|
|
uint32 version = 5;
|
2018-10-11 01:16:33 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
message Empty {
|
|
|
|
|
}
|
2018-10-14 23:12:43 -07:00
|
|
|
|
|
|
|
|
message VacuumVolumeCheckRequest {
|
2019-03-23 11:33:34 -07:00
|
|
|
uint32 volume_id = 1;
|
2018-10-14 23:12:43 -07:00
|
|
|
}
|
|
|
|
|
message VacuumVolumeCheckResponse {
|
|
|
|
|
double garbage_ratio = 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
message VacuumVolumeCompactRequest {
|
2019-03-23 11:33:34 -07:00
|
|
|
uint32 volume_id = 1;
|
2018-10-14 23:12:43 -07:00
|
|
|
int64 preallocate = 2;
|
|
|
|
|
}
|
|
|
|
|
message VacuumVolumeCompactResponse {
|
2021-10-24 01:55:34 -07:00
|
|
|
int64 processed_bytes = 1;
|
2022-08-01 21:32:21 +05:00
|
|
|
float load_avg_1m = 2;
|
2018-10-14 23:12:43 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
message VacuumVolumeCommitRequest {
|
2019-03-23 11:33:34 -07:00
|
|
|
uint32 volume_id = 1;
|
2018-10-14 23:12:43 -07:00
|
|
|
}
|
|
|
|
|
message VacuumVolumeCommitResponse {
|
2020-03-17 09:43:57 -07:00
|
|
|
bool is_read_only = 1;
|
2023-06-06 01:17:21 +08:00
|
|
|
uint64 volume_size = 2;
|
2018-10-14 23:12:43 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
message VacuumVolumeCleanupRequest {
|
2019-03-23 11:33:34 -07:00
|
|
|
uint32 volume_id = 1;
|
2018-10-14 23:12:43 -07:00
|
|
|
}
|
|
|
|
|
message VacuumVolumeCleanupResponse {
|
|
|
|
|
}
|
2018-10-15 00:03:55 -07:00
|
|
|
|
|
|
|
|
message DeleteCollectionRequest {
|
|
|
|
|
string collection = 1;
|
|
|
|
|
}
|
|
|
|
|
message DeleteCollectionResponse {
|
|
|
|
|
}
|
2018-10-15 00:40:46 -07:00
|
|
|
|
2019-04-10 21:41:17 -07:00
|
|
|
message AllocateVolumeRequest {
|
2019-03-23 11:33:34 -07:00
|
|
|
uint32 volume_id = 1;
|
2018-10-15 00:40:46 -07:00
|
|
|
string collection = 2;
|
|
|
|
|
int64 preallocate = 3;
|
|
|
|
|
string replication = 4;
|
|
|
|
|
string ttl = 5;
|
2019-10-21 22:57:01 -07:00
|
|
|
uint32 memory_map_max_size_mb = 6;
|
2020-12-13 23:08:21 -08:00
|
|
|
string disk_type = 7;
|
2025-06-16 22:05:06 -07:00
|
|
|
uint32 version = 8;
|
2018-10-15 00:40:46 -07:00
|
|
|
}
|
2019-04-10 21:41:17 -07:00
|
|
|
message AllocateVolumeResponse {
|
2018-10-15 00:40:46 -07:00
|
|
|
}
|
2018-10-15 01:19:15 -07:00
|
|
|
|
|
|
|
|
message VolumeSyncStatusRequest {
|
2019-03-23 11:33:34 -07:00
|
|
|
uint32 volume_id = 1;
|
2018-10-15 01:19:15 -07:00
|
|
|
}
|
|
|
|
|
message VolumeSyncStatusResponse {
|
2019-03-23 11:33:34 -07:00
|
|
|
uint32 volume_id = 1;
|
2018-12-22 11:10:08 -08:00
|
|
|
string collection = 2;
|
2018-10-15 01:19:15 -07:00
|
|
|
string replication = 4;
|
|
|
|
|
string ttl = 5;
|
|
|
|
|
uint64 tail_offset = 6;
|
|
|
|
|
uint32 compact_revision = 7;
|
|
|
|
|
uint64 idx_file_size = 8;
|
2025-06-16 22:05:06 -07:00
|
|
|
uint32 version = 9;
|
2018-10-15 01:19:15 -07:00
|
|
|
}
|
2018-10-15 01:48:15 -07:00
|
|
|
|
2019-04-17 22:04:49 -07:00
|
|
|
message VolumeIncrementalCopyRequest {
|
2019-03-25 09:16:12 -07:00
|
|
|
uint32 volume_id = 1;
|
2019-04-18 00:18:29 -07:00
|
|
|
uint64 since_ns = 2;
|
2019-03-25 09:16:12 -07:00
|
|
|
}
|
2019-04-17 22:04:49 -07:00
|
|
|
message VolumeIncrementalCopyResponse {
|
2019-03-25 09:16:12 -07:00
|
|
|
bytes file_content = 1;
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-15 01:48:15 -07:00
|
|
|
message VolumeMountRequest {
|
2019-03-23 11:33:34 -07:00
|
|
|
uint32 volume_id = 1;
|
2018-10-15 01:48:15 -07:00
|
|
|
}
|
|
|
|
|
message VolumeMountResponse {
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
message VolumeUnmountRequest {
|
2019-03-23 11:33:34 -07:00
|
|
|
uint32 volume_id = 1;
|
2018-10-15 01:48:15 -07:00
|
|
|
}
|
|
|
|
|
message VolumeUnmountResponse {
|
|
|
|
|
}
|
2018-10-15 22:25:28 -07:00
|
|
|
|
2018-12-29 00:03:30 -08:00
|
|
|
message VolumeDeleteRequest {
|
2019-03-23 11:33:34 -07:00
|
|
|
uint32 volume_id = 1;
|
2023-06-12 22:42:44 +05:00
|
|
|
bool only_empty = 2;
|
2018-12-29 00:03:30 -08:00
|
|
|
}
|
|
|
|
|
message VolumeDeleteResponse {
|
|
|
|
|
}
|
|
|
|
|
|
2019-06-26 23:02:22 -07:00
|
|
|
message VolumeMarkReadonlyRequest {
|
|
|
|
|
uint32 volume_id = 1;
|
2024-09-25 02:15:54 +03:00
|
|
|
bool persist = 2;
|
2019-06-26 23:02:22 -07:00
|
|
|
}
|
|
|
|
|
message VolumeMarkReadonlyResponse {
|
|
|
|
|
}
|
|
|
|
|
|
2020-08-19 11:42:56 -04:00
|
|
|
message VolumeMarkWritableRequest {
|
|
|
|
|
uint32 volume_id = 1;
|
|
|
|
|
}
|
|
|
|
|
message VolumeMarkWritableResponse {
|
|
|
|
|
}
|
|
|
|
|
|
2020-02-02 15:37:23 -08:00
|
|
|
message VolumeConfigureRequest {
|
|
|
|
|
uint32 volume_id = 1;
|
|
|
|
|
string replication = 2;
|
|
|
|
|
}
|
|
|
|
|
message VolumeConfigureResponse {
|
|
|
|
|
string error = 1;
|
|
|
|
|
}
|
|
|
|
|
|
2020-08-19 11:42:56 -04:00
|
|
|
message VolumeStatusRequest {
|
|
|
|
|
uint32 volume_id = 1;
|
|
|
|
|
}
|
|
|
|
|
message VolumeStatusResponse {
|
|
|
|
|
bool is_read_only = 1;
|
2023-06-06 01:17:21 +08:00
|
|
|
uint64 volume_size = 2;
|
2023-10-09 21:57:26 +05:00
|
|
|
uint64 file_count = 3;
|
|
|
|
|
uint64 file_deleted_count = 4;
|
2020-08-19 11:42:56 -04:00
|
|
|
}
|
|
|
|
|
|
2019-04-17 22:04:49 -07:00
|
|
|
message VolumeCopyRequest {
|
2019-03-23 11:33:34 -07:00
|
|
|
uint32 volume_id = 1;
|
|
|
|
|
string collection = 2;
|
|
|
|
|
string replication = 3;
|
|
|
|
|
string ttl = 4;
|
|
|
|
|
string source_data_node = 5;
|
2021-02-09 23:58:08 -08:00
|
|
|
string disk_type = 6;
|
2022-08-22 14:08:31 +08:00
|
|
|
int64 io_byte_per_second = 7;
|
2019-03-23 11:33:34 -07:00
|
|
|
}
|
2019-04-17 22:04:49 -07:00
|
|
|
message VolumeCopyResponse {
|
2019-04-19 00:39:34 -07:00
|
|
|
uint64 last_append_at_ns = 1;
|
2021-10-24 02:52:56 -07:00
|
|
|
int64 processed_bytes = 2;
|
2019-03-23 11:33:34 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
message CopyFileRequest {
|
|
|
|
|
uint32 volume_id = 1;
|
2019-05-19 21:37:49 -07:00
|
|
|
string ext = 2;
|
|
|
|
|
uint32 compaction_revision = 3;
|
|
|
|
|
uint64 stop_offset = 4;
|
2019-06-03 02:26:31 -07:00
|
|
|
string collection = 5;
|
|
|
|
|
bool is_ec_volume = 6;
|
2019-12-23 12:48:20 -08:00
|
|
|
bool ignore_source_file_not_found = 7;
|
2019-03-23 11:33:34 -07:00
|
|
|
}
|
|
|
|
|
message CopyFileResponse {
|
|
|
|
|
bytes file_content = 1;
|
2021-09-01 02:42:57 -07:00
|
|
|
int64 modified_ts_ns = 2;
|
2019-03-23 11:33:34 -07:00
|
|
|
}
|
|
|
|
|
|
2025-07-30 12:38:03 -07:00
|
|
|
message ReceiveFileRequest {
|
|
|
|
|
oneof data {
|
|
|
|
|
ReceiveFileInfo info = 1;
|
|
|
|
|
bytes file_content = 2;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
message ReceiveFileInfo {
|
|
|
|
|
uint32 volume_id = 1;
|
|
|
|
|
string ext = 2;
|
|
|
|
|
string collection = 3;
|
|
|
|
|
bool is_ec_volume = 4;
|
|
|
|
|
uint32 shard_id = 5;
|
|
|
|
|
uint64 file_size = 6;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
message ReceiveFileResponse {
|
|
|
|
|
uint64 bytes_written = 1;
|
|
|
|
|
string error = 2;
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-22 00:03:16 -07:00
|
|
|
message ReadNeedleBlobRequest {
|
|
|
|
|
uint32 volume_id = 1;
|
|
|
|
|
int64 offset = 3; // actual offset
|
|
|
|
|
int32 size = 4;
|
|
|
|
|
}
|
|
|
|
|
message ReadNeedleBlobResponse {
|
|
|
|
|
bytes needle_blob = 1;
|
|
|
|
|
}
|
|
|
|
|
|
2022-09-06 23:51:27 -07:00
|
|
|
message ReadNeedleMetaRequest {
|
|
|
|
|
uint32 volume_id = 1;
|
|
|
|
|
uint64 needle_id = 2;
|
|
|
|
|
int64 offset = 3; // actual offset
|
|
|
|
|
int32 size = 4;
|
|
|
|
|
}
|
|
|
|
|
message ReadNeedleMetaResponse {
|
|
|
|
|
uint32 cookie = 1;
|
|
|
|
|
uint64 last_modified = 2;
|
|
|
|
|
uint32 crc = 3;
|
|
|
|
|
string ttl = 4;
|
2022-10-24 22:09:38 -07:00
|
|
|
uint64 append_at_ns = 5;
|
2022-09-06 23:51:27 -07:00
|
|
|
}
|
|
|
|
|
|
2021-03-22 00:03:16 -07:00
|
|
|
message WriteNeedleBlobRequest {
|
|
|
|
|
uint32 volume_id = 1;
|
|
|
|
|
uint64 needle_id = 2;
|
|
|
|
|
int32 size = 3;
|
|
|
|
|
bytes needle_blob = 4;
|
|
|
|
|
}
|
|
|
|
|
message WriteNeedleBlobResponse {
|
|
|
|
|
}
|
|
|
|
|
|
2021-09-27 01:45:32 -07:00
|
|
|
message ReadAllNeedlesRequest {
|
2021-09-27 02:51:31 -07:00
|
|
|
repeated uint32 volume_ids = 1;
|
2021-09-27 01:45:32 -07:00
|
|
|
}
|
|
|
|
|
message ReadAllNeedlesResponse {
|
2021-09-27 02:51:31 -07:00
|
|
|
uint32 volume_id = 1;
|
|
|
|
|
uint64 needle_id = 2;
|
|
|
|
|
uint32 cookie = 3;
|
|
|
|
|
bytes needle_blob = 5;
|
2022-11-20 23:19:41 -05:00
|
|
|
bool needle_blob_compressed = 6;
|
|
|
|
|
uint64 last_modified = 7;
|
|
|
|
|
uint32 crc = 8;
|
2022-11-23 18:59:38 -05:00
|
|
|
bytes name = 9;
|
|
|
|
|
bytes mime = 10;
|
2021-09-27 01:45:32 -07:00
|
|
|
}
|
|
|
|
|
|
2019-04-20 11:35:20 -07:00
|
|
|
message VolumeTailSenderRequest {
|
2019-04-18 00:18:29 -07:00
|
|
|
uint32 volume_id = 1;
|
|
|
|
|
uint64 since_ns = 2;
|
2019-04-20 12:05:28 -07:00
|
|
|
uint32 idle_timeout_seconds = 3;
|
2019-04-18 00:18:29 -07:00
|
|
|
}
|
2019-04-20 11:35:20 -07:00
|
|
|
message VolumeTailSenderResponse {
|
2019-04-18 00:18:29 -07:00
|
|
|
bytes needle_header = 1;
|
|
|
|
|
bytes needle_body = 2;
|
2019-04-18 19:22:13 -07:00
|
|
|
bool is_last_chunk = 3;
|
2025-06-16 22:46:13 -07:00
|
|
|
uint32 version = 4;
|
2019-04-18 00:18:29 -07:00
|
|
|
}
|
|
|
|
|
|
2019-04-20 11:35:20 -07:00
|
|
|
message VolumeTailReceiverRequest {
|
|
|
|
|
uint32 volume_id = 1;
|
|
|
|
|
uint64 since_ns = 2;
|
2019-04-20 12:05:28 -07:00
|
|
|
uint32 idle_timeout_seconds = 3;
|
2019-04-20 11:35:20 -07:00
|
|
|
string source_volume_server = 4;
|
|
|
|
|
}
|
|
|
|
|
message VolumeTailReceiverResponse {
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-25 14:02:06 -07:00
|
|
|
message VolumeEcShardsGenerateRequest {
|
2019-05-20 00:53:17 -07:00
|
|
|
uint32 volume_id = 1;
|
2019-05-25 14:02:06 -07:00
|
|
|
string collection = 2;
|
2019-05-20 00:53:17 -07:00
|
|
|
}
|
2019-05-25 14:02:06 -07:00
|
|
|
message VolumeEcShardsGenerateResponse {
|
2019-05-20 00:53:17 -07:00
|
|
|
}
|
|
|
|
|
|
2019-06-03 02:26:31 -07:00
|
|
|
message VolumeEcShardsRebuildRequest {
|
|
|
|
|
uint32 volume_id = 1;
|
|
|
|
|
string collection = 2;
|
|
|
|
|
}
|
|
|
|
|
message VolumeEcShardsRebuildResponse {
|
|
|
|
|
repeated uint32 rebuilt_shard_ids = 1;
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-25 14:02:06 -07:00
|
|
|
message VolumeEcShardsCopyRequest {
|
2019-05-20 00:53:17 -07:00
|
|
|
uint32 volume_id = 1;
|
|
|
|
|
string collection = 2;
|
2019-05-27 11:59:03 -07:00
|
|
|
repeated uint32 shard_ids = 3;
|
2019-06-03 02:26:31 -07:00
|
|
|
bool copy_ecx_file = 4;
|
2019-05-20 00:53:17 -07:00
|
|
|
string source_data_node = 5;
|
2019-12-23 12:48:20 -08:00
|
|
|
bool copy_ecj_file = 6;
|
2019-12-28 12:44:59 -08:00
|
|
|
bool copy_vif_file = 7;
|
2025-07-30 12:38:03 -07:00
|
|
|
uint32 disk_id = 8; // Target disk ID for storing EC shards
|
2019-05-20 00:53:17 -07:00
|
|
|
}
|
2019-05-25 14:02:06 -07:00
|
|
|
message VolumeEcShardsCopyResponse {
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
message VolumeEcShardsDeleteRequest {
|
|
|
|
|
uint32 volume_id = 1;
|
2019-06-03 02:26:31 -07:00
|
|
|
string collection = 2;
|
2019-05-27 11:59:03 -07:00
|
|
|
repeated uint32 shard_ids = 3;
|
2019-05-25 14:02:06 -07:00
|
|
|
}
|
|
|
|
|
message VolumeEcShardsDeleteResponse {
|
2019-05-20 00:53:17 -07:00
|
|
|
}
|
|
|
|
|
|
2019-05-25 23:23:19 -07:00
|
|
|
message VolumeEcShardsMountRequest {
|
|
|
|
|
uint32 volume_id = 1;
|
|
|
|
|
string collection = 2;
|
2019-05-27 11:59:03 -07:00
|
|
|
repeated uint32 shard_ids = 3;
|
2019-05-25 23:23:19 -07:00
|
|
|
}
|
|
|
|
|
message VolumeEcShardsMountResponse {
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
message VolumeEcShardsUnmountRequest {
|
|
|
|
|
uint32 volume_id = 1;
|
2019-05-27 11:59:03 -07:00
|
|
|
repeated uint32 shard_ids = 3;
|
2019-05-25 23:23:19 -07:00
|
|
|
}
|
|
|
|
|
message VolumeEcShardsUnmountResponse {
|
|
|
|
|
}
|
|
|
|
|
|
2019-05-27 11:59:03 -07:00
|
|
|
message VolumeEcShardReadRequest {
|
|
|
|
|
uint32 volume_id = 1;
|
|
|
|
|
uint32 shard_id = 2;
|
|
|
|
|
int64 offset = 3;
|
|
|
|
|
int64 size = 4;
|
2019-06-21 01:14:10 -07:00
|
|
|
uint64 file_key = 5;
|
2019-05-27 11:59:03 -07:00
|
|
|
}
|
|
|
|
|
message VolumeEcShardReadResponse {
|
|
|
|
|
bytes data = 1;
|
2019-06-21 01:14:10 -07:00
|
|
|
bool is_deleted = 2;
|
2019-05-27 11:59:03 -07:00
|
|
|
}
|
|
|
|
|
|
2019-06-20 00:17:11 -07:00
|
|
|
message VolumeEcBlobDeleteRequest {
|
|
|
|
|
uint32 volume_id = 1;
|
|
|
|
|
string collection = 2;
|
|
|
|
|
uint64 file_key = 3;
|
|
|
|
|
uint32 version = 4;
|
|
|
|
|
}
|
|
|
|
|
message VolumeEcBlobDeleteResponse {
|
|
|
|
|
}
|
|
|
|
|
|
2019-12-23 12:48:20 -08:00
|
|
|
message VolumeEcShardsToVolumeRequest {
|
|
|
|
|
uint32 volume_id = 1;
|
|
|
|
|
string collection = 2;
|
|
|
|
|
}
|
|
|
|
|
message VolumeEcShardsToVolumeResponse {
|
|
|
|
|
}
|
|
|
|
|
|
2025-07-30 12:38:03 -07:00
|
|
|
message VolumeEcShardsInfoRequest {
|
|
|
|
|
uint32 volume_id = 1;
|
|
|
|
|
}
|
|
|
|
|
message VolumeEcShardsInfoResponse {
|
|
|
|
|
repeated EcShardInfo ec_shard_infos = 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
message EcShardInfo {
|
|
|
|
|
uint32 shard_id = 1;
|
|
|
|
|
int64 size = 2;
|
|
|
|
|
string collection = 3;
|
|
|
|
|
}
|
|
|
|
|
|
2019-03-23 11:33:34 -07:00
|
|
|
message ReadVolumeFileStatusRequest {
|
|
|
|
|
uint32 volume_id = 1;
|
2018-10-15 22:25:28 -07:00
|
|
|
}
|
2019-03-23 11:33:34 -07:00
|
|
|
message ReadVolumeFileStatusResponse {
|
|
|
|
|
uint32 volume_id = 1;
|
2019-04-19 00:39:34 -07:00
|
|
|
uint64 idx_file_timestamp_seconds = 2;
|
2019-03-23 11:33:34 -07:00
|
|
|
uint64 idx_file_size = 3;
|
2019-04-19 00:39:34 -07:00
|
|
|
uint64 dat_file_timestamp_seconds = 4;
|
2019-03-23 11:33:34 -07:00
|
|
|
uint64 dat_file_size = 5;
|
2019-04-10 19:41:55 +08:00
|
|
|
uint64 file_count = 6;
|
2019-04-19 12:29:49 -07:00
|
|
|
uint32 compaction_revision = 7;
|
2019-04-20 11:35:20 -07:00
|
|
|
string collection = 8;
|
2020-12-13 23:08:21 -08:00
|
|
|
string disk_type = 9;
|
2022-10-16 17:52:22 -07:00
|
|
|
VolumeInfo volume_info = 10;
|
2025-06-16 22:05:06 -07:00
|
|
|
uint32 version = 11;
|
2018-10-15 22:25:28 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
message DiskStatus {
|
|
|
|
|
string dir = 1;
|
|
|
|
|
uint64 all = 2;
|
|
|
|
|
uint64 used = 3;
|
|
|
|
|
uint64 free = 4;
|
2020-02-23 18:04:22 -08:00
|
|
|
float percent_free = 5;
|
|
|
|
|
float percent_used = 6;
|
2020-12-14 00:51:57 -08:00
|
|
|
string disk_type = 7;
|
2018-10-15 22:25:28 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
message MemStatus {
|
|
|
|
|
int32 goroutines = 1;
|
|
|
|
|
uint64 all = 2;
|
|
|
|
|
uint64 used = 3;
|
|
|
|
|
uint64 free = 4;
|
|
|
|
|
uint64 self = 5;
|
|
|
|
|
uint64 heap = 6;
|
|
|
|
|
uint64 stack = 7;
|
|
|
|
|
}
|
2019-10-02 12:06:03 -07:00
|
|
|
|
2019-11-27 03:09:42 -08:00
|
|
|
// tired storage on volume servers
|
2019-12-02 15:08:28 -08:00
|
|
|
message RemoteFile {
|
2019-11-27 03:09:42 -08:00
|
|
|
string backend_type = 1;
|
2019-12-02 15:08:28 -08:00
|
|
|
string backend_id = 2;
|
|
|
|
|
string key = 3;
|
|
|
|
|
uint64 offset = 4;
|
|
|
|
|
uint64 file_size = 5;
|
|
|
|
|
uint64 modified_time = 6;
|
2019-12-28 11:16:10 -08:00
|
|
|
string extension = 7;
|
2019-12-02 15:08:28 -08:00
|
|
|
}
|
2019-12-28 11:21:49 -08:00
|
|
|
message VolumeInfo {
|
2019-12-02 15:08:28 -08:00
|
|
|
repeated RemoteFile files = 1;
|
2019-12-28 12:28:58 -08:00
|
|
|
uint32 version = 2;
|
2020-02-02 15:37:23 -08:00
|
|
|
string replication = 3;
|
2024-10-24 21:36:56 -07:00
|
|
|
uint32 bytes_offset = 4;
|
2024-10-24 21:35:11 -07:00
|
|
|
int64 dat_file_size = 5; // store the original dat file size
|
|
|
|
|
uint64 expire_at_sec = 6; // expiration time of ec volume
|
2024-09-05 22:58:24 +08:00
|
|
|
bool read_only = 7;
|
Erasure Coding: Ec refactoring (#7396)
* refactor: add ECContext structure to encapsulate EC parameters
- Create ec_context.go with ECContext struct
- NewDefaultECContext() creates context with default 10+4 configuration
- Helper methods: CreateEncoder(), ToExt(), String()
- Foundation for cleaner function signatures
- No behavior change, still uses hardcoded 10+4
* refactor: update ec_encoder.go to use ECContext
- Add WriteEcFilesWithContext() and RebuildEcFilesWithContext() functions
- Keep old functions for backward compatibility (call new versions)
- Update all internal functions to accept ECContext parameter
- Use ctx.DataShards, ctx.ParityShards, ctx.TotalShards consistently
- Use ctx.CreateEncoder() instead of hardcoded reedsolomon.New()
- Use ctx.ToExt() for shard file extensions
- No behavior change, still uses default 10+4 configuration
* refactor: update ec_volume.go to use ECContext
- Add ECContext field to EcVolume struct
- Initialize ECContext with default configuration in NewEcVolume()
- Update LocateEcShardNeedleInterval() to use ECContext.DataShards
- Phase 1: Always uses default 10+4 configuration
- No behavior change
* refactor: add EC shard count fields to VolumeInfo protobuf
- Add data_shards_count field (field 8) to VolumeInfo message
- Add parity_shards_count field (field 9) to VolumeInfo message
- Fields are optional, 0 means use default (10+4)
- Backward compatible: fields added at end
- Phase 1: Foundation for future customization
* refactor: regenerate protobuf Go files with EC shard count fields
- Regenerated volume_server_pb/*.go with new EC fields
- DataShardsCount and ParityShardsCount accessors added to VolumeInfo
- No behavior change, fields not yet used
* refactor: update VolumeEcShardsGenerate to use ECContext
- Create ECContext with default configuration in VolumeEcShardsGenerate
- Use ecCtx.TotalShards and ecCtx.ToExt() in cleanup
- Call WriteEcFilesWithContext() instead of WriteEcFiles()
- Save EC configuration (DataShardsCount, ParityShardsCount) to VolumeInfo
- Log EC context being used
- Phase 1: Always uses default 10+4 configuration
- No behavior change
* fmt
* refactor: update ec_test.go to use ECContext
- Update TestEncodingDecoding to create and use ECContext
- Update validateFiles() to accept ECContext parameter
- Update removeGeneratedFiles() to use ctx.TotalShards and ctx.ToExt()
- Test passes with default 10+4 configuration
* refactor: use EcShardConfig message instead of separate fields
* optimize: pre-calculate row sizes in EC encoding loop
* refactor: replace TotalShards field with Total() method
- Remove TotalShards field from ECContext to avoid field drift
- Add Total() method that computes DataShards + ParityShards
- Update all references to use ctx.Total() instead of ctx.TotalShards
- Read EC config from VolumeInfo when loading EC volumes
- Read data shard count from .vif in VolumeEcShardsToVolume
- Use >= instead of > for exact boundary handling in encoding loops
* optimize: simplify VolumeEcShardsToVolume to use existing EC context
- Remove redundant CollectEcShards call
- Remove redundant .vif file loading
- Use v.ECContext.DataShards directly (already loaded by NewEcVolume)
- Slice tempShards instead of collecting again
* refactor: rename MaxShardId to MaxShardCount for clarity
- Change from MaxShardId=31 to MaxShardCount=32
- Eliminates confusing +1 arithmetic (MaxShardId+1)
- More intuitive: MaxShardCount directly represents the limit
fix: support custom EC ratios beyond 14 shards in VolumeEcShardsToVolume
- Add MaxShardId constant (31, since ShardBits is uint32)
- Use MaxShardId+1 (32) instead of TotalShardsCount (14) for tempShards buffer
- Prevents panic when slicing for volumes with >14 total shards
- Critical fix for custom EC configurations like 20+10
* fix: add validation for EC shard counts from VolumeInfo
- Validate DataShards/ParityShards are positive and within MaxShardCount
- Prevent zero or invalid values that could cause divide-by-zero
- Fallback to defaults if validation fails, with warning log
- VolumeEcShardsGenerate now preserves existing EC config when regenerating
- Critical safety fix for corrupted or legacy .vif files
* fix: RebuildEcFiles now loads EC config from .vif file
- Critical: RebuildEcFiles was always using default 10+4 config
- Now loads actual EC config from .vif file when rebuilding shards
- Validates config before use (positive shards, within MaxShardCount)
- Falls back to default if .vif missing or invalid
- Prevents data corruption when rebuilding custom EC volumes
* add: defensive validation for dataShards in VolumeEcShardsToVolume
- Validate dataShards > 0 and <= MaxShardCount before use
- Prevents panic from corrupted or uninitialized ECContext
- Returns clear error message instead of panic
- Defense-in-depth: validates even though upstream should catch issues
* fix: replace TotalShardsCount with MaxShardCount for custom EC ratio support
Critical fixes to support custom EC ratios > 14 shards:
disk_location_ec.go:
- validateEcVolume: Check shards 0-31 instead of 0-13 during validation
- removeEcVolumeFiles: Remove shards 0-31 instead of 0-13 during cleanup
ec_volume_info.go ShardBits methods:
- ShardIds(): Iterate up to MaxShardCount (32) instead of TotalShardsCount (14)
- ToUint32Slice(): Iterate up to MaxShardCount (32)
- IndexToShardId(): Iterate up to MaxShardCount (32)
- MinusParityShards(): Remove shards 10-31 instead of 10-13 (added note about Phase 2)
- Minus() shard size copy: Iterate up to MaxShardCount (32)
- resizeShardSizes(): Iterate up to MaxShardCount (32)
Without these changes:
- Custom EC ratios > 14 total shards would fail validation on startup
- Shards 14-31 would never be discovered or cleaned up
- ShardBits operations would miss shards >= 14
These changes are backward compatible - MaxShardCount (32) includes
the default TotalShardsCount (14), so existing 10+4 volumes work as before.
* fix: replace TotalShardsCount with MaxShardCount in critical data structures
Critical fixes for buffer allocations and loops that must support
custom EC ratios up to 32 shards:
Data Structures:
- store_ec.go:354: Buffer allocation for shard recovery (bufs array)
- topology_ec.go:14: EcShardLocations.Locations fixed array size
- command_ec_rebuild.go:268: EC shard map allocation
- command_ec_common.go:626: Shard-to-locations map allocation
Shard Discovery Loops:
- ec_task.go:378: Loop to find generated shard files
- ec_shard_management.go: All 8 loops that check/count EC shards
These changes are critical because:
1. Buffer allocations sized to 14 would cause index-out-of-bounds panics
when accessing shards 14-31
2. Fixed arrays sized to 14 would truncate shard location data
3. Loops limited to 0-13 would never discover/manage shards 14-31
Note: command_ec_encode.go:208 intentionally NOT changed - it creates
shard IDs to mount after encoding. In Phase 1 we always generate 14
shards, so this remains TotalShardsCount and will be made dynamic in
Phase 2 based on actual EC context.
Without these fixes, custom EC ratios > 14 total shards would cause:
- Runtime panics (array index out of bounds)
- Data loss (shards 14-31 never discovered/tracked)
- Incomplete shard management (missing shards not detected)
* refactor: move MaxShardCount constant to ec_encoder.go
Moved MaxShardCount from ec_volume_info.go to ec_encoder.go to group it
with other shard count constants (DataShardsCount, ParityShardsCount,
TotalShardsCount). This improves code organization and makes it easier
to understand the relationship between these constants.
Location: ec_encoder.go line 22, between TotalShardsCount and MinTotalDisks
* improve: add defensive programming and better error messages for EC
Code review improvements from CodeRabbit:
1. ShardBits Guardrails (ec_volume_info.go):
- AddShardId, RemoveShardId: Reject shard IDs >= MaxShardCount
- HasShardId: Return false for out-of-range shard IDs
- Prevents silent no-ops from bit shifts with invalid IDs
2. Future-Proof Regex (disk_location_ec.go):
- Updated regex from \.ec[0-9][0-9] to \.ec\d{2,3}
- Now matches .ec00 through .ec999 (currently .ec00-.ec31 used)
- Supports future increases to MaxShardCount beyond 99
3. Better Error Messages (volume_grpc_erasure_coding.go):
- Include valid range (1..32) in dataShards validation error
- Helps operators quickly identify the problem
4. Validation Before Save (volume_grpc_erasure_coding.go):
- Validate ECContext (DataShards > 0, ParityShards > 0, Total <= MaxShardCount)
- Log EC config being saved to .vif for debugging
- Prevents writing invalid configs to disk
These changes improve robustness and debuggability without changing
core functionality.
* fmt
* fix: critical bugs from code review + clean up comments
Critical bug fixes:
1. command_ec_rebuild.go: Fixed indentation causing compilation error
- Properly nested if/for blocks in registerEcNode
2. ec_shard_management.go: Fixed isComplete logic incorrectly using MaxShardCount
- Changed from MaxShardCount (32) back to TotalShardsCount (14)
- Default 10+4 volumes were being incorrectly reported as incomplete
- Missing shards 14-31 were being incorrectly reported as missing
- Fixed in 4 locations: volume completeness checks and getMissingShards
3. ec_volume_info.go: Fixed MinusParityShards removing too many shards
- Changed from MaxShardCount (32) back to TotalShardsCount (14)
- Was incorrectly removing shard IDs 10-31 instead of just 10-13
Comment cleanup:
- Removed Phase 1/Phase 2 references (development plan context)
- Replaced with clear statements about default 10+4 configuration
- SeaweedFS repo uses fixed 10+4 EC ratio, no phases needed
Root cause: Over-aggressive replacement of TotalShardsCount with MaxShardCount.
MaxShardCount (32) is the limit for buffer allocations and shard ID loops,
but TotalShardsCount (14) must be used for default EC configuration logic.
* fix: add defensive bounds checks and compute actual shard counts
Critical fixes from code review:
1. topology_ec.go: Add defensive bounds checks to AddShard/DeleteShard
- Prevent panic when shardId >= MaxShardCount (32)
- Return false instead of crashing on out-of-range shard IDs
2. command_ec_common.go: Fix doBalanceEcShardsAcrossRacks
- Was using hardcoded TotalShardsCount (14) for all volumes
- Now computes actual totalShardsForVolume from rackToShardCount
- Fixes incorrect rebalancing for volumes with custom EC ratios
- Example: 5+2=7 shards would incorrectly use 14 as average
These fixes improve robustness and prepare for future custom EC ratios
without changing current behavior for default 10+4 volumes.
Note: MinusParityShards and ec_task.go intentionally NOT changed for
seaweedfs repo - these will be enhanced in seaweed-enterprise repo
where custom EC ratio configuration is added.
* fmt
* style: make MaxShardCount type casting explicit in loops
Improved code clarity by explicitly casting MaxShardCount to the
appropriate type when used in loop comparisons:
- ShardId comparisons: Cast to ShardId(MaxShardCount)
- uint32 comparisons: Cast to uint32(MaxShardCount)
Changed in 5 locations:
- Minus() loop (line 90)
- ShardIds() loop (line 143)
- ToUint32Slice() loop (line 152)
- IndexToShardId() loop (line 219)
- resizeShardSizes() loop (line 248)
This makes the intent explicit and improves type safety readability.
No functional changes - purely a style improvement.
2025-10-27 22:13:31 -07:00
|
|
|
EcShardConfig ec_shard_config = 8; // EC shard configuration (optional, null = use default 10+4)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// EcShardConfig specifies erasure coding shard configuration
|
|
|
|
|
message EcShardConfig {
|
|
|
|
|
uint32 data_shards = 1; // Number of data shards (e.g., 10)
|
|
|
|
|
uint32 parity_shards = 2; // Number of parity shards (e.g., 4)
|
2019-11-27 03:09:42 -08:00
|
|
|
}
|
2024-10-28 19:44:30 -07:00
|
|
|
message OldVersionVolumeInfo {
|
|
|
|
|
repeated RemoteFile files = 1;
|
|
|
|
|
uint32 version = 2;
|
|
|
|
|
string replication = 3;
|
|
|
|
|
uint32 BytesOffset = 4;
|
|
|
|
|
int64 dat_file_size = 5; // store the original dat file size
|
|
|
|
|
uint64 DestroyTime = 6; // expiration time of ec volume
|
|
|
|
|
bool read_only = 7;
|
|
|
|
|
}
|
2019-11-27 03:09:42 -08:00
|
|
|
|
2021-08-07 14:18:53 -07:00
|
|
|
// tiered storage
|
2019-12-25 09:53:13 -08:00
|
|
|
message VolumeTierMoveDatToRemoteRequest {
|
2019-11-27 03:09:42 -08:00
|
|
|
uint32 volume_id = 1;
|
|
|
|
|
string collection = 2;
|
2019-11-29 01:05:09 -08:00
|
|
|
string destination_backend_name = 3;
|
2019-12-02 15:08:28 -08:00
|
|
|
bool keep_local_dat_file = 4;
|
2019-11-27 03:09:42 -08:00
|
|
|
}
|
2019-12-25 09:53:13 -08:00
|
|
|
message VolumeTierMoveDatToRemoteResponse {
|
|
|
|
|
int64 processed = 1;
|
|
|
|
|
float processedPercentage = 2;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
message VolumeTierMoveDatFromRemoteRequest {
|
|
|
|
|
uint32 volume_id = 1;
|
|
|
|
|
string collection = 2;
|
|
|
|
|
bool keep_remote_dat_file = 3;
|
|
|
|
|
}
|
|
|
|
|
message VolumeTierMoveDatFromRemoteResponse {
|
2019-12-02 15:08:28 -08:00
|
|
|
int64 processed = 1;
|
|
|
|
|
float processedPercentage = 2;
|
2019-11-27 03:09:42 -08:00
|
|
|
}
|
|
|
|
|
|
2020-02-21 21:45:03 -08:00
|
|
|
message VolumeServerStatusRequest {
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
message VolumeServerStatusResponse {
|
|
|
|
|
repeated DiskStatus disk_statuses = 1;
|
|
|
|
|
MemStatus memory_status = 2;
|
2022-06-12 11:56:23 -07:00
|
|
|
string version = 3;
|
|
|
|
|
string data_center = 4;
|
|
|
|
|
string rack = 5;
|
2020-02-21 21:45:03 -08:00
|
|
|
}
|
|
|
|
|
|
2020-09-13 21:25:51 -07:00
|
|
|
message VolumeServerLeaveRequest {
|
|
|
|
|
}
|
|
|
|
|
message VolumeServerLeaveResponse {
|
|
|
|
|
}
|
|
|
|
|
|
2021-08-07 14:18:53 -07:00
|
|
|
// remote storage
|
|
|
|
|
message FetchAndWriteNeedleRequest {
|
|
|
|
|
uint32 volume_id = 1;
|
|
|
|
|
uint64 needle_id = 2;
|
2021-08-09 14:35:18 -07:00
|
|
|
uint32 cookie = 3;
|
|
|
|
|
int64 offset = 4;
|
|
|
|
|
int64 size = 5;
|
2021-09-06 18:30:44 -07:00
|
|
|
message Replica {
|
|
|
|
|
string url = 1;
|
|
|
|
|
string public_url = 2;
|
2021-09-12 22:47:52 -07:00
|
|
|
int32 grpc_port = 3;
|
2021-09-06 18:30:44 -07:00
|
|
|
}
|
|
|
|
|
repeated Replica replicas = 6;
|
|
|
|
|
string auth = 7;
|
2021-08-07 14:18:53 -07:00
|
|
|
// remote conf
|
2021-08-26 15:18:34 -07:00
|
|
|
remote_pb.RemoteConf remote_conf = 15;
|
|
|
|
|
remote_pb.RemoteStorageLocation remote_location = 16;
|
2021-08-07 14:18:53 -07:00
|
|
|
}
|
|
|
|
|
message FetchAndWriteNeedleResponse {
|
2022-12-10 21:49:07 -08:00
|
|
|
string e_tag = 1;
|
2021-08-07 14:18:53 -07:00
|
|
|
}
|
|
|
|
|
|
2019-11-27 03:09:42 -08:00
|
|
|
// select on volume servers
|
2019-10-02 12:06:03 -07:00
|
|
|
message QueryRequest {
|
2019-10-06 22:35:05 -07:00
|
|
|
repeated string selections = 1;
|
2019-10-02 12:06:03 -07:00
|
|
|
repeated string from_file_ids = 2;
|
2019-10-06 22:35:05 -07:00
|
|
|
message Filter {
|
|
|
|
|
string field = 1;
|
|
|
|
|
string operand = 2;
|
|
|
|
|
string value = 3;
|
|
|
|
|
}
|
|
|
|
|
Filter filter = 3;
|
2019-10-02 12:06:03 -07:00
|
|
|
|
|
|
|
|
message InputSerialization {
|
|
|
|
|
// NONE | GZIP | BZIP2
|
|
|
|
|
string compression_type = 1;
|
|
|
|
|
message CSVInput {
|
2019-11-29 01:05:09 -08:00
|
|
|
string file_header_info = 1; // Valid values: NONE | USE | IGNORE
|
|
|
|
|
string record_delimiter = 2; // Default: \n
|
|
|
|
|
string field_delimiter = 3; // Default: ,
|
2022-09-14 15:09:53 -05:00
|
|
|
string quote_character = 4; // Default: "
|
2019-10-02 12:06:03 -07:00
|
|
|
string quote_escape_character = 5; // Default: "
|
2019-11-29 01:05:09 -08:00
|
|
|
string comments = 6; // Default: #
|
2019-10-02 12:06:03 -07:00
|
|
|
// If true, records might contain record delimiters within quote characters
|
2019-11-29 01:05:09 -08:00
|
|
|
bool allow_quoted_record_delimiter = 7; // default False.
|
2019-10-02 12:06:03 -07:00
|
|
|
}
|
|
|
|
|
message JSONInput {
|
2019-11-29 01:05:09 -08:00
|
|
|
string type = 1; // Valid values: DOCUMENT | LINES
|
2019-10-02 12:06:03 -07:00
|
|
|
}
|
|
|
|
|
message ParquetInput {
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
CSVInput csv_input = 2;
|
|
|
|
|
JSONInput json_input = 3;
|
|
|
|
|
ParquetInput parquet_input = 4;
|
|
|
|
|
}
|
|
|
|
|
InputSerialization input_serialization = 4;
|
|
|
|
|
|
|
|
|
|
message OutputSerialization {
|
|
|
|
|
message CSVOutput {
|
2019-11-29 01:05:09 -08:00
|
|
|
string quote_fields = 1; // Valid values: ALWAYS | ASNEEDED
|
|
|
|
|
string record_delimiter = 2; // Default: \n
|
|
|
|
|
string field_delimiter = 3; // Default: ,
|
2022-09-14 15:09:53 -05:00
|
|
|
string quote_character = 4; // Default: "
|
2019-10-02 12:06:03 -07:00
|
|
|
string quote_escape_character = 5; // Default: "
|
|
|
|
|
}
|
|
|
|
|
message JSONOutput {
|
|
|
|
|
string record_delimiter = 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
CSVOutput csv_output = 2;
|
|
|
|
|
JSONOutput json_output = 3;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
OutputSerialization output_serialization = 5;
|
|
|
|
|
}
|
|
|
|
|
message QueriedStripe {
|
2019-10-06 22:35:05 -07:00
|
|
|
bytes records = 1;
|
2019-10-02 12:06:03 -07:00
|
|
|
}
|
2020-07-22 15:02:21 -04:00
|
|
|
|
|
|
|
|
message VolumeNeedleStatusRequest {
|
|
|
|
|
uint32 volume_id = 1;
|
|
|
|
|
uint64 needle_id = 2;
|
|
|
|
|
}
|
|
|
|
|
message VolumeNeedleStatusResponse {
|
|
|
|
|
uint64 needle_id = 1;
|
|
|
|
|
uint32 cookie = 2;
|
|
|
|
|
uint32 size = 3;
|
|
|
|
|
uint64 last_modified = 4;
|
|
|
|
|
uint32 crc = 5;
|
|
|
|
|
string ttl = 6;
|
|
|
|
|
}
|
2022-04-01 16:37:06 -07:00
|
|
|
|
|
|
|
|
message PingRequest {
|
|
|
|
|
string target = 1; // default to ping itself
|
|
|
|
|
string target_type = 2;
|
|
|
|
|
}
|
|
|
|
|
message PingResponse {
|
2022-04-16 12:45:49 -07:00
|
|
|
int64 start_time_ns = 1;
|
|
|
|
|
int64 remote_time_ns = 2;
|
|
|
|
|
int64 stop_time_ns = 3;
|
2022-04-01 16:37:06 -07:00
|
|
|
}
|