Files
seaweedfs/weed/pb/volume_server.proto

686 lines
17 KiB
Protocol Buffer
Raw Normal View History

2018-10-11 01:16:33 -07:00
syntax = "proto3";
package volume_server_pb;
option go_package = "github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb";
2018-10-11 01:16:33 -07:00
2021-08-26 15:18:34 -07:00
import "remote.proto";
2018-10-11 01:16:33 -07:00
//////////////////////////////////////////////////
service VolumeServer {
//Experts only: takes multiple fid parameters. This function does not propagate deletes to replicas.
rpc BatchDelete (BatchDeleteRequest) returns (BatchDeleteResponse) {
}
2018-10-14 23:12:43 -07:00
rpc VacuumVolumeCheck (VacuumVolumeCheckRequest) returns (VacuumVolumeCheckResponse) {
}
rpc VacuumVolumeCompact (VacuumVolumeCompactRequest) returns (stream VacuumVolumeCompactResponse) {
2018-10-14 23:12:43 -07:00
}
rpc VacuumVolumeCommit (VacuumVolumeCommitRequest) returns (VacuumVolumeCommitResponse) {
}
rpc VacuumVolumeCleanup (VacuumVolumeCleanupRequest) returns (VacuumVolumeCleanupResponse) {
}
rpc DeleteCollection (DeleteCollectionRequest) returns (DeleteCollectionResponse) {
}
2019-04-10 21:41:17 -07:00
rpc AllocateVolume (AllocateVolumeRequest) returns (AllocateVolumeResponse) {
}
2018-10-15 21:44:41 -07:00
rpc VolumeSyncStatus (VolumeSyncStatusRequest) returns (VolumeSyncStatusResponse) {
}
2019-04-17 22:04:49 -07:00
rpc VolumeIncrementalCopy (VolumeIncrementalCopyRequest) returns (stream VolumeIncrementalCopyResponse) {
2019-03-25 09:16:12 -07:00
}
rpc VolumeMount (VolumeMountRequest) returns (VolumeMountResponse) {
}
rpc VolumeUnmount (VolumeUnmountRequest) returns (VolumeUnmountResponse) {
}
rpc VolumeDelete (VolumeDeleteRequest) returns (VolumeDeleteResponse) {
}
rpc VolumeMarkReadonly (VolumeMarkReadonlyRequest) returns (VolumeMarkReadonlyResponse) {
}
rpc VolumeMarkWritable (VolumeMarkWritableRequest) returns (VolumeMarkWritableResponse) {
}
rpc VolumeConfigure (VolumeConfigureRequest) returns (VolumeConfigureResponse) {
}
rpc VolumeStatus (VolumeStatusRequest) returns (VolumeStatusResponse) {
}
2019-04-17 22:04:49 -07:00
// copy the .idx .dat files, and mount this volume
rpc VolumeCopy (VolumeCopyRequest) returns (stream VolumeCopyResponse) {
}
rpc ReadVolumeFileStatus (ReadVolumeFileStatusRequest) returns (ReadVolumeFileStatusResponse) {
}
rpc CopyFile (CopyFileRequest) returns (stream CopyFileResponse) {
}
Admin: misc improvements on admin server and workers. EC now works. (#7055) * initial design * added simulation as tests * reorganized the codebase to move the simulation framework and tests into their own dedicated package * integration test. ec worker task * remove "enhanced" reference * start master, volume servers, filer Current Status ✅ Master: Healthy and running (port 9333) ✅ Filer: Healthy and running (port 8888) ✅ Volume Servers: All 6 servers running (ports 8080-8085) 🔄 Admin/Workers: Will start when dependencies are ready * generate write load * tasks are assigned * admin start wtih grpc port. worker has its own working directory * Update .gitignore * working worker and admin. Task detection is not working yet. * compiles, detection uses volumeSizeLimitMB from master * compiles * worker retries connecting to admin * build and restart * rendering pending tasks * skip task ID column * sticky worker id * test canScheduleTaskNow * worker reconnect to admin * clean up logs * worker register itself first * worker can run ec work and report status but: 1. one volume should not be repeatedly worked on. 2. ec shards needs to be distributed and source data should be deleted. * move ec task logic * listing ec shards * local copy, ec. Need to distribute. * ec is mostly working now * distribution of ec shards needs improvement * need configuration to enable ec * show ec volumes * interval field UI component * rename * integration test with vauuming * garbage percentage threshold * fix warning * display ec shard sizes * fix ec volumes list * Update ui.go * show default values * ensure correct default value * MaintenanceConfig use ConfigField * use schema defined defaults * config * reduce duplication * refactor to use BaseUIProvider * each task register its schema * checkECEncodingCandidate use ecDetector * use vacuumDetector * use volumeSizeLimitMB * remove remove * remove unused * refactor * use new framework * remove v2 reference * refactor * left menu can scroll now * The maintenance manager was not being initialized when no data directory was configured for persistent storage. * saving config * Update task_config_schema_templ.go * enable/disable tasks * protobuf encoded task configurations * fix system settings * use ui component * remove logs * interface{} Reduction * reduce interface{} * reduce interface{} * avoid from/to map * reduce interface{} * refactor * keep it DRY * added logging * debug messages * debug level * debug * show the log caller line * use configured task policy * log level * handle admin heartbeat response * Update worker.go * fix EC rack and dc count * Report task status to admin server * fix task logging, simplify interface checking, use erasure_coding constants * factor in empty volume server during task planning * volume.list adds disk id * track disk id also * fix locking scheduled and manual scanning * add active topology * simplify task detector * ec task completed, but shards are not showing up * implement ec in ec_typed.go * adjust log level * dedup * implementing ec copying shards and only ecx files * use disk id when distributing ec shards 🎯 Planning: ActiveTopology creates DestinationPlan with specific TargetDisk 📦 Task Creation: maintenance_integration.go creates ECDestination with DiskId 🚀 Task Execution: EC task passes DiskId in VolumeEcShardsCopyRequest 💾 Volume Server: Receives disk_id and stores shards on specific disk (vs.store.Locations[req.DiskId]) 📂 File System: EC shards and metadata land in the exact disk directory planned * Delete original volume from all locations * clean up existing shard locations * local encoding and distributing * Update docker/admin_integration/EC-TESTING-README.md Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> * check volume id range * simplify * fix tests * fix types * clean up logs and tests --------- Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
2025-07-30 12:38:03 -07:00
rpc ReceiveFile (stream ReceiveFileRequest) returns (ReceiveFileResponse) {
}
rpc ReadNeedleBlob (ReadNeedleBlobRequest) returns (ReadNeedleBlobResponse) {
}
rpc ReadNeedleMeta (ReadNeedleMetaRequest) returns (ReadNeedleMetaResponse) {
}
rpc WriteNeedleBlob (WriteNeedleBlobRequest) returns (WriteNeedleBlobResponse) {
}
rpc ReadAllNeedles (ReadAllNeedlesRequest) returns (stream ReadAllNeedlesResponse) {
}
2019-04-20 11:35:20 -07:00
rpc VolumeTailSender (VolumeTailSenderRequest) returns (stream VolumeTailSenderResponse) {
}
rpc VolumeTailReceiver (VolumeTailReceiverRequest) returns (VolumeTailReceiverResponse) {
}
// erasure coding
rpc VolumeEcShardsGenerate (VolumeEcShardsGenerateRequest) returns (VolumeEcShardsGenerateResponse) {
}
2019-06-03 02:26:31 -07:00
rpc VolumeEcShardsRebuild (VolumeEcShardsRebuildRequest) returns (VolumeEcShardsRebuildResponse) {
}
rpc VolumeEcShardsCopy (VolumeEcShardsCopyRequest) returns (VolumeEcShardsCopyResponse) {
}
rpc VolumeEcShardsDelete (VolumeEcShardsDeleteRequest) returns (VolumeEcShardsDeleteResponse) {
}
2019-05-25 23:23:19 -07:00
rpc VolumeEcShardsMount (VolumeEcShardsMountRequest) returns (VolumeEcShardsMountResponse) {
}
rpc VolumeEcShardsUnmount (VolumeEcShardsUnmountRequest) returns (VolumeEcShardsUnmountResponse) {
}
2019-05-27 11:59:03 -07:00
rpc VolumeEcShardRead (VolumeEcShardReadRequest) returns (stream VolumeEcShardReadResponse) {
}
2019-06-20 00:17:11 -07:00
rpc VolumeEcBlobDelete (VolumeEcBlobDeleteRequest) returns (VolumeEcBlobDeleteResponse) {
}
2019-12-23 12:48:20 -08:00
rpc VolumeEcShardsToVolume (VolumeEcShardsToVolumeRequest) returns (VolumeEcShardsToVolumeResponse) {
}
Admin: misc improvements on admin server and workers. EC now works. (#7055) * initial design * added simulation as tests * reorganized the codebase to move the simulation framework and tests into their own dedicated package * integration test. ec worker task * remove "enhanced" reference * start master, volume servers, filer Current Status ✅ Master: Healthy and running (port 9333) ✅ Filer: Healthy and running (port 8888) ✅ Volume Servers: All 6 servers running (ports 8080-8085) 🔄 Admin/Workers: Will start when dependencies are ready * generate write load * tasks are assigned * admin start wtih grpc port. worker has its own working directory * Update .gitignore * working worker and admin. Task detection is not working yet. * compiles, detection uses volumeSizeLimitMB from master * compiles * worker retries connecting to admin * build and restart * rendering pending tasks * skip task ID column * sticky worker id * test canScheduleTaskNow * worker reconnect to admin * clean up logs * worker register itself first * worker can run ec work and report status but: 1. one volume should not be repeatedly worked on. 2. ec shards needs to be distributed and source data should be deleted. * move ec task logic * listing ec shards * local copy, ec. Need to distribute. * ec is mostly working now * distribution of ec shards needs improvement * need configuration to enable ec * show ec volumes * interval field UI component * rename * integration test with vauuming * garbage percentage threshold * fix warning * display ec shard sizes * fix ec volumes list * Update ui.go * show default values * ensure correct default value * MaintenanceConfig use ConfigField * use schema defined defaults * config * reduce duplication * refactor to use BaseUIProvider * each task register its schema * checkECEncodingCandidate use ecDetector * use vacuumDetector * use volumeSizeLimitMB * remove remove * remove unused * refactor * use new framework * remove v2 reference * refactor * left menu can scroll now * The maintenance manager was not being initialized when no data directory was configured for persistent storage. * saving config * Update task_config_schema_templ.go * enable/disable tasks * protobuf encoded task configurations * fix system settings * use ui component * remove logs * interface{} Reduction * reduce interface{} * reduce interface{} * avoid from/to map * reduce interface{} * refactor * keep it DRY * added logging * debug messages * debug level * debug * show the log caller line * use configured task policy * log level * handle admin heartbeat response * Update worker.go * fix EC rack and dc count * Report task status to admin server * fix task logging, simplify interface checking, use erasure_coding constants * factor in empty volume server during task planning * volume.list adds disk id * track disk id also * fix locking scheduled and manual scanning * add active topology * simplify task detector * ec task completed, but shards are not showing up * implement ec in ec_typed.go * adjust log level * dedup * implementing ec copying shards and only ecx files * use disk id when distributing ec shards 🎯 Planning: ActiveTopology creates DestinationPlan with specific TargetDisk 📦 Task Creation: maintenance_integration.go creates ECDestination with DiskId 🚀 Task Execution: EC task passes DiskId in VolumeEcShardsCopyRequest 💾 Volume Server: Receives disk_id and stores shards on specific disk (vs.store.Locations[req.DiskId]) 📂 File System: EC shards and metadata land in the exact disk directory planned * Delete original volume from all locations * clean up existing shard locations * local encoding and distributing * Update docker/admin_integration/EC-TESTING-README.md Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> * check volume id range * simplify * fix tests * fix types * clean up logs and tests --------- Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
2025-07-30 12:38:03 -07:00
rpc VolumeEcShardsInfo (VolumeEcShardsInfoRequest) returns (VolumeEcShardsInfoResponse) {
}
2019-11-27 03:09:42 -08:00
// tiered storage
rpc VolumeTierMoveDatToRemote (VolumeTierMoveDatToRemoteRequest) returns (stream VolumeTierMoveDatToRemoteResponse) {
}
rpc VolumeTierMoveDatFromRemote (VolumeTierMoveDatFromRemoteRequest) returns (stream VolumeTierMoveDatFromRemoteResponse) {
2019-11-27 03:09:42 -08:00
}
2020-02-21 21:45:03 -08:00
rpc VolumeServerStatus (VolumeServerStatusRequest) returns (VolumeServerStatusResponse) {
}
2020-09-13 21:25:51 -07:00
rpc VolumeServerLeave (VolumeServerLeaveRequest) returns (VolumeServerLeaveResponse) {
}
2020-02-21 21:45:03 -08:00
2021-08-07 14:18:53 -07:00
// remote storage
rpc FetchAndWriteNeedle (FetchAndWriteNeedleRequest) returns (FetchAndWriteNeedleResponse) {
}
2020-02-21 21:45:03 -08:00
// <experimental> query
rpc Query (QueryRequest) returns (stream QueriedStripe) {
}
rpc VolumeNeedleStatus (VolumeNeedleStatusRequest) returns (VolumeNeedleStatusResponse) {
}
2022-04-01 16:37:06 -07:00
rpc Ping (PingRequest) returns (PingResponse) {
}
2018-10-11 01:16:33 -07:00
}
//////////////////////////////////////////////////
message BatchDeleteRequest {
repeated string file_ids = 1;
2020-03-30 01:19:33 -07:00
bool skip_cookie_check = 2;
2018-10-11 01:16:33 -07:00
}
message BatchDeleteResponse {
repeated DeleteResult results = 1;
}
message DeleteResult {
string file_id = 1;
int32 status = 2;
string error = 3;
uint32 size = 4;
2019-06-20 00:17:11 -07:00
uint32 version = 5;
2018-10-11 01:16:33 -07:00
}
message Empty {
}
2018-10-14 23:12:43 -07:00
message VacuumVolumeCheckRequest {
uint32 volume_id = 1;
2018-10-14 23:12:43 -07:00
}
message VacuumVolumeCheckResponse {
double garbage_ratio = 1;
}
message VacuumVolumeCompactRequest {
uint32 volume_id = 1;
2018-10-14 23:12:43 -07:00
int64 preallocate = 2;
}
message VacuumVolumeCompactResponse {
int64 processed_bytes = 1;
2022-08-01 21:32:21 +05:00
float load_avg_1m = 2;
2018-10-14 23:12:43 -07:00
}
message VacuumVolumeCommitRequest {
uint32 volume_id = 1;
2018-10-14 23:12:43 -07:00
}
message VacuumVolumeCommitResponse {
bool is_read_only = 1;
uint64 volume_size = 2;
2018-10-14 23:12:43 -07:00
}
message VacuumVolumeCleanupRequest {
uint32 volume_id = 1;
2018-10-14 23:12:43 -07:00
}
message VacuumVolumeCleanupResponse {
}
message DeleteCollectionRequest {
string collection = 1;
}
message DeleteCollectionResponse {
}
2019-04-10 21:41:17 -07:00
message AllocateVolumeRequest {
uint32 volume_id = 1;
string collection = 2;
int64 preallocate = 3;
string replication = 4;
string ttl = 5;
2019-10-21 22:57:01 -07:00
uint32 memory_map_max_size_mb = 6;
2020-12-13 23:08:21 -08:00
string disk_type = 7;
2025-06-16 22:05:06 -07:00
uint32 version = 8;
}
2019-04-10 21:41:17 -07:00
message AllocateVolumeResponse {
}
message VolumeSyncStatusRequest {
uint32 volume_id = 1;
}
message VolumeSyncStatusResponse {
uint32 volume_id = 1;
string collection = 2;
string replication = 4;
string ttl = 5;
uint64 tail_offset = 6;
uint32 compact_revision = 7;
uint64 idx_file_size = 8;
2025-06-16 22:05:06 -07:00
uint32 version = 9;
}
2019-04-17 22:04:49 -07:00
message VolumeIncrementalCopyRequest {
2019-03-25 09:16:12 -07:00
uint32 volume_id = 1;
uint64 since_ns = 2;
2019-03-25 09:16:12 -07:00
}
2019-04-17 22:04:49 -07:00
message VolumeIncrementalCopyResponse {
2019-03-25 09:16:12 -07:00
bytes file_content = 1;
}
message VolumeMountRequest {
uint32 volume_id = 1;
}
message VolumeMountResponse {
}
message VolumeUnmountRequest {
uint32 volume_id = 1;
}
message VolumeUnmountResponse {
}
message VolumeDeleteRequest {
uint32 volume_id = 1;
bool only_empty = 2;
}
message VolumeDeleteResponse {
}
message VolumeMarkReadonlyRequest {
uint32 volume_id = 1;
bool persist = 2;
}
message VolumeMarkReadonlyResponse {
}
message VolumeMarkWritableRequest {
uint32 volume_id = 1;
}
message VolumeMarkWritableResponse {
}
message VolumeConfigureRequest {
uint32 volume_id = 1;
string replication = 2;
}
message VolumeConfigureResponse {
string error = 1;
}
message VolumeStatusRequest {
uint32 volume_id = 1;
}
message VolumeStatusResponse {
bool is_read_only = 1;
uint64 volume_size = 2;
uint64 file_count = 3;
uint64 file_deleted_count = 4;
}
2019-04-17 22:04:49 -07:00
message VolumeCopyRequest {
uint32 volume_id = 1;
string collection = 2;
string replication = 3;
string ttl = 4;
string source_data_node = 5;
string disk_type = 6;
int64 io_byte_per_second = 7;
}
2019-04-17 22:04:49 -07:00
message VolumeCopyResponse {
2019-04-19 00:39:34 -07:00
uint64 last_append_at_ns = 1;
int64 processed_bytes = 2;
}
message CopyFileRequest {
uint32 volume_id = 1;
2019-05-19 21:37:49 -07:00
string ext = 2;
uint32 compaction_revision = 3;
uint64 stop_offset = 4;
2019-06-03 02:26:31 -07:00
string collection = 5;
bool is_ec_volume = 6;
2019-12-23 12:48:20 -08:00
bool ignore_source_file_not_found = 7;
}
message CopyFileResponse {
bytes file_content = 1;
int64 modified_ts_ns = 2;
}
Admin: misc improvements on admin server and workers. EC now works. (#7055) * initial design * added simulation as tests * reorganized the codebase to move the simulation framework and tests into their own dedicated package * integration test. ec worker task * remove "enhanced" reference * start master, volume servers, filer Current Status ✅ Master: Healthy and running (port 9333) ✅ Filer: Healthy and running (port 8888) ✅ Volume Servers: All 6 servers running (ports 8080-8085) 🔄 Admin/Workers: Will start when dependencies are ready * generate write load * tasks are assigned * admin start wtih grpc port. worker has its own working directory * Update .gitignore * working worker and admin. Task detection is not working yet. * compiles, detection uses volumeSizeLimitMB from master * compiles * worker retries connecting to admin * build and restart * rendering pending tasks * skip task ID column * sticky worker id * test canScheduleTaskNow * worker reconnect to admin * clean up logs * worker register itself first * worker can run ec work and report status but: 1. one volume should not be repeatedly worked on. 2. ec shards needs to be distributed and source data should be deleted. * move ec task logic * listing ec shards * local copy, ec. Need to distribute. * ec is mostly working now * distribution of ec shards needs improvement * need configuration to enable ec * show ec volumes * interval field UI component * rename * integration test with vauuming * garbage percentage threshold * fix warning * display ec shard sizes * fix ec volumes list * Update ui.go * show default values * ensure correct default value * MaintenanceConfig use ConfigField * use schema defined defaults * config * reduce duplication * refactor to use BaseUIProvider * each task register its schema * checkECEncodingCandidate use ecDetector * use vacuumDetector * use volumeSizeLimitMB * remove remove * remove unused * refactor * use new framework * remove v2 reference * refactor * left menu can scroll now * The maintenance manager was not being initialized when no data directory was configured for persistent storage. * saving config * Update task_config_schema_templ.go * enable/disable tasks * protobuf encoded task configurations * fix system settings * use ui component * remove logs * interface{} Reduction * reduce interface{} * reduce interface{} * avoid from/to map * reduce interface{} * refactor * keep it DRY * added logging * debug messages * debug level * debug * show the log caller line * use configured task policy * log level * handle admin heartbeat response * Update worker.go * fix EC rack and dc count * Report task status to admin server * fix task logging, simplify interface checking, use erasure_coding constants * factor in empty volume server during task planning * volume.list adds disk id * track disk id also * fix locking scheduled and manual scanning * add active topology * simplify task detector * ec task completed, but shards are not showing up * implement ec in ec_typed.go * adjust log level * dedup * implementing ec copying shards and only ecx files * use disk id when distributing ec shards 🎯 Planning: ActiveTopology creates DestinationPlan with specific TargetDisk 📦 Task Creation: maintenance_integration.go creates ECDestination with DiskId 🚀 Task Execution: EC task passes DiskId in VolumeEcShardsCopyRequest 💾 Volume Server: Receives disk_id and stores shards on specific disk (vs.store.Locations[req.DiskId]) 📂 File System: EC shards and metadata land in the exact disk directory planned * Delete original volume from all locations * clean up existing shard locations * local encoding and distributing * Update docker/admin_integration/EC-TESTING-README.md Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> * check volume id range * simplify * fix tests * fix types * clean up logs and tests --------- Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
2025-07-30 12:38:03 -07:00
message ReceiveFileRequest {
oneof data {
ReceiveFileInfo info = 1;
bytes file_content = 2;
}
}
message ReceiveFileInfo {
uint32 volume_id = 1;
string ext = 2;
string collection = 3;
bool is_ec_volume = 4;
uint32 shard_id = 5;
uint64 file_size = 6;
}
message ReceiveFileResponse {
uint64 bytes_written = 1;
string error = 2;
}
message ReadNeedleBlobRequest {
uint32 volume_id = 1;
int64 offset = 3; // actual offset
int32 size = 4;
}
message ReadNeedleBlobResponse {
bytes needle_blob = 1;
}
message ReadNeedleMetaRequest {
uint32 volume_id = 1;
uint64 needle_id = 2;
int64 offset = 3; // actual offset
int32 size = 4;
}
message ReadNeedleMetaResponse {
uint32 cookie = 1;
uint64 last_modified = 2;
uint32 crc = 3;
string ttl = 4;
uint64 append_at_ns = 5;
}
message WriteNeedleBlobRequest {
uint32 volume_id = 1;
uint64 needle_id = 2;
int32 size = 3;
bytes needle_blob = 4;
}
message WriteNeedleBlobResponse {
}
message ReadAllNeedlesRequest {
repeated uint32 volume_ids = 1;
}
message ReadAllNeedlesResponse {
uint32 volume_id = 1;
uint64 needle_id = 2;
uint32 cookie = 3;
bytes needle_blob = 5;
bool needle_blob_compressed = 6;
uint64 last_modified = 7;
uint32 crc = 8;
bytes name = 9;
bytes mime = 10;
}
2019-04-20 11:35:20 -07:00
message VolumeTailSenderRequest {
uint32 volume_id = 1;
uint64 since_ns = 2;
2019-04-20 12:05:28 -07:00
uint32 idle_timeout_seconds = 3;
}
2019-04-20 11:35:20 -07:00
message VolumeTailSenderResponse {
bytes needle_header = 1;
bytes needle_body = 2;
2019-04-18 19:22:13 -07:00
bool is_last_chunk = 3;
uint32 version = 4;
}
2019-04-20 11:35:20 -07:00
message VolumeTailReceiverRequest {
uint32 volume_id = 1;
uint64 since_ns = 2;
2019-04-20 12:05:28 -07:00
uint32 idle_timeout_seconds = 3;
2019-04-20 11:35:20 -07:00
string source_volume_server = 4;
}
message VolumeTailReceiverResponse {
}
message VolumeEcShardsGenerateRequest {
uint32 volume_id = 1;
string collection = 2;
}
message VolumeEcShardsGenerateResponse {
}
2019-06-03 02:26:31 -07:00
message VolumeEcShardsRebuildRequest {
uint32 volume_id = 1;
string collection = 2;
}
message VolumeEcShardsRebuildResponse {
repeated uint32 rebuilt_shard_ids = 1;
}
message VolumeEcShardsCopyRequest {
uint32 volume_id = 1;
string collection = 2;
2019-05-27 11:59:03 -07:00
repeated uint32 shard_ids = 3;
2019-06-03 02:26:31 -07:00
bool copy_ecx_file = 4;
string source_data_node = 5;
2019-12-23 12:48:20 -08:00
bool copy_ecj_file = 6;
2019-12-28 12:44:59 -08:00
bool copy_vif_file = 7;
Admin: misc improvements on admin server and workers. EC now works. (#7055) * initial design * added simulation as tests * reorganized the codebase to move the simulation framework and tests into their own dedicated package * integration test. ec worker task * remove "enhanced" reference * start master, volume servers, filer Current Status ✅ Master: Healthy and running (port 9333) ✅ Filer: Healthy and running (port 8888) ✅ Volume Servers: All 6 servers running (ports 8080-8085) 🔄 Admin/Workers: Will start when dependencies are ready * generate write load * tasks are assigned * admin start wtih grpc port. worker has its own working directory * Update .gitignore * working worker and admin. Task detection is not working yet. * compiles, detection uses volumeSizeLimitMB from master * compiles * worker retries connecting to admin * build and restart * rendering pending tasks * skip task ID column * sticky worker id * test canScheduleTaskNow * worker reconnect to admin * clean up logs * worker register itself first * worker can run ec work and report status but: 1. one volume should not be repeatedly worked on. 2. ec shards needs to be distributed and source data should be deleted. * move ec task logic * listing ec shards * local copy, ec. Need to distribute. * ec is mostly working now * distribution of ec shards needs improvement * need configuration to enable ec * show ec volumes * interval field UI component * rename * integration test with vauuming * garbage percentage threshold * fix warning * display ec shard sizes * fix ec volumes list * Update ui.go * show default values * ensure correct default value * MaintenanceConfig use ConfigField * use schema defined defaults * config * reduce duplication * refactor to use BaseUIProvider * each task register its schema * checkECEncodingCandidate use ecDetector * use vacuumDetector * use volumeSizeLimitMB * remove remove * remove unused * refactor * use new framework * remove v2 reference * refactor * left menu can scroll now * The maintenance manager was not being initialized when no data directory was configured for persistent storage. * saving config * Update task_config_schema_templ.go * enable/disable tasks * protobuf encoded task configurations * fix system settings * use ui component * remove logs * interface{} Reduction * reduce interface{} * reduce interface{} * avoid from/to map * reduce interface{} * refactor * keep it DRY * added logging * debug messages * debug level * debug * show the log caller line * use configured task policy * log level * handle admin heartbeat response * Update worker.go * fix EC rack and dc count * Report task status to admin server * fix task logging, simplify interface checking, use erasure_coding constants * factor in empty volume server during task planning * volume.list adds disk id * track disk id also * fix locking scheduled and manual scanning * add active topology * simplify task detector * ec task completed, but shards are not showing up * implement ec in ec_typed.go * adjust log level * dedup * implementing ec copying shards and only ecx files * use disk id when distributing ec shards 🎯 Planning: ActiveTopology creates DestinationPlan with specific TargetDisk 📦 Task Creation: maintenance_integration.go creates ECDestination with DiskId 🚀 Task Execution: EC task passes DiskId in VolumeEcShardsCopyRequest 💾 Volume Server: Receives disk_id and stores shards on specific disk (vs.store.Locations[req.DiskId]) 📂 File System: EC shards and metadata land in the exact disk directory planned * Delete original volume from all locations * clean up existing shard locations * local encoding and distributing * Update docker/admin_integration/EC-TESTING-README.md Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> * check volume id range * simplify * fix tests * fix types * clean up logs and tests --------- Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
2025-07-30 12:38:03 -07:00
uint32 disk_id = 8; // Target disk ID for storing EC shards
}
message VolumeEcShardsCopyResponse {
}
message VolumeEcShardsDeleteRequest {
uint32 volume_id = 1;
2019-06-03 02:26:31 -07:00
string collection = 2;
2019-05-27 11:59:03 -07:00
repeated uint32 shard_ids = 3;
}
message VolumeEcShardsDeleteResponse {
}
2019-05-25 23:23:19 -07:00
message VolumeEcShardsMountRequest {
uint32 volume_id = 1;
string collection = 2;
2019-05-27 11:59:03 -07:00
repeated uint32 shard_ids = 3;
2019-05-25 23:23:19 -07:00
}
message VolumeEcShardsMountResponse {
}
message VolumeEcShardsUnmountRequest {
uint32 volume_id = 1;
2019-05-27 11:59:03 -07:00
repeated uint32 shard_ids = 3;
2019-05-25 23:23:19 -07:00
}
message VolumeEcShardsUnmountResponse {
}
2019-05-27 11:59:03 -07:00
message VolumeEcShardReadRequest {
uint32 volume_id = 1;
uint32 shard_id = 2;
int64 offset = 3;
int64 size = 4;
2019-06-21 01:14:10 -07:00
uint64 file_key = 5;
2019-05-27 11:59:03 -07:00
}
message VolumeEcShardReadResponse {
bytes data = 1;
2019-06-21 01:14:10 -07:00
bool is_deleted = 2;
2019-05-27 11:59:03 -07:00
}
2019-06-20 00:17:11 -07:00
message VolumeEcBlobDeleteRequest {
uint32 volume_id = 1;
string collection = 2;
uint64 file_key = 3;
uint32 version = 4;
}
message VolumeEcBlobDeleteResponse {
}
2019-12-23 12:48:20 -08:00
message VolumeEcShardsToVolumeRequest {
uint32 volume_id = 1;
string collection = 2;
}
message VolumeEcShardsToVolumeResponse {
}
Admin: misc improvements on admin server and workers. EC now works. (#7055) * initial design * added simulation as tests * reorganized the codebase to move the simulation framework and tests into their own dedicated package * integration test. ec worker task * remove "enhanced" reference * start master, volume servers, filer Current Status ✅ Master: Healthy and running (port 9333) ✅ Filer: Healthy and running (port 8888) ✅ Volume Servers: All 6 servers running (ports 8080-8085) 🔄 Admin/Workers: Will start when dependencies are ready * generate write load * tasks are assigned * admin start wtih grpc port. worker has its own working directory * Update .gitignore * working worker and admin. Task detection is not working yet. * compiles, detection uses volumeSizeLimitMB from master * compiles * worker retries connecting to admin * build and restart * rendering pending tasks * skip task ID column * sticky worker id * test canScheduleTaskNow * worker reconnect to admin * clean up logs * worker register itself first * worker can run ec work and report status but: 1. one volume should not be repeatedly worked on. 2. ec shards needs to be distributed and source data should be deleted. * move ec task logic * listing ec shards * local copy, ec. Need to distribute. * ec is mostly working now * distribution of ec shards needs improvement * need configuration to enable ec * show ec volumes * interval field UI component * rename * integration test with vauuming * garbage percentage threshold * fix warning * display ec shard sizes * fix ec volumes list * Update ui.go * show default values * ensure correct default value * MaintenanceConfig use ConfigField * use schema defined defaults * config * reduce duplication * refactor to use BaseUIProvider * each task register its schema * checkECEncodingCandidate use ecDetector * use vacuumDetector * use volumeSizeLimitMB * remove remove * remove unused * refactor * use new framework * remove v2 reference * refactor * left menu can scroll now * The maintenance manager was not being initialized when no data directory was configured for persistent storage. * saving config * Update task_config_schema_templ.go * enable/disable tasks * protobuf encoded task configurations * fix system settings * use ui component * remove logs * interface{} Reduction * reduce interface{} * reduce interface{} * avoid from/to map * reduce interface{} * refactor * keep it DRY * added logging * debug messages * debug level * debug * show the log caller line * use configured task policy * log level * handle admin heartbeat response * Update worker.go * fix EC rack and dc count * Report task status to admin server * fix task logging, simplify interface checking, use erasure_coding constants * factor in empty volume server during task planning * volume.list adds disk id * track disk id also * fix locking scheduled and manual scanning * add active topology * simplify task detector * ec task completed, but shards are not showing up * implement ec in ec_typed.go * adjust log level * dedup * implementing ec copying shards and only ecx files * use disk id when distributing ec shards 🎯 Planning: ActiveTopology creates DestinationPlan with specific TargetDisk 📦 Task Creation: maintenance_integration.go creates ECDestination with DiskId 🚀 Task Execution: EC task passes DiskId in VolumeEcShardsCopyRequest 💾 Volume Server: Receives disk_id and stores shards on specific disk (vs.store.Locations[req.DiskId]) 📂 File System: EC shards and metadata land in the exact disk directory planned * Delete original volume from all locations * clean up existing shard locations * local encoding and distributing * Update docker/admin_integration/EC-TESTING-README.md Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> * check volume id range * simplify * fix tests * fix types * clean up logs and tests --------- Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
2025-07-30 12:38:03 -07:00
message VolumeEcShardsInfoRequest {
uint32 volume_id = 1;
}
message VolumeEcShardsInfoResponse {
repeated EcShardInfo ec_shard_infos = 1;
}
message EcShardInfo {
uint32 shard_id = 1;
int64 size = 2;
string collection = 3;
}
message ReadVolumeFileStatusRequest {
uint32 volume_id = 1;
}
message ReadVolumeFileStatusResponse {
uint32 volume_id = 1;
2019-04-19 00:39:34 -07:00
uint64 idx_file_timestamp_seconds = 2;
uint64 idx_file_size = 3;
2019-04-19 00:39:34 -07:00
uint64 dat_file_timestamp_seconds = 4;
uint64 dat_file_size = 5;
uint64 file_count = 6;
uint32 compaction_revision = 7;
2019-04-20 11:35:20 -07:00
string collection = 8;
2020-12-13 23:08:21 -08:00
string disk_type = 9;
VolumeInfo volume_info = 10;
2025-06-16 22:05:06 -07:00
uint32 version = 11;
}
message DiskStatus {
string dir = 1;
uint64 all = 2;
uint64 used = 3;
uint64 free = 4;
float percent_free = 5;
float percent_used = 6;
2020-12-14 00:51:57 -08:00
string disk_type = 7;
}
message MemStatus {
int32 goroutines = 1;
uint64 all = 2;
uint64 used = 3;
uint64 free = 4;
uint64 self = 5;
uint64 heap = 6;
uint64 stack = 7;
}
2019-11-27 03:09:42 -08:00
// tired storage on volume servers
message RemoteFile {
2019-11-27 03:09:42 -08:00
string backend_type = 1;
string backend_id = 2;
string key = 3;
uint64 offset = 4;
uint64 file_size = 5;
uint64 modified_time = 6;
2019-12-28 11:16:10 -08:00
string extension = 7;
}
2019-12-28 11:21:49 -08:00
message VolumeInfo {
repeated RemoteFile files = 1;
uint32 version = 2;
string replication = 3;
uint32 bytes_offset = 4;
int64 dat_file_size = 5; // store the original dat file size
uint64 expire_at_sec = 6; // expiration time of ec volume
bool read_only = 7;
Erasure Coding: Ec refactoring (#7396) * refactor: add ECContext structure to encapsulate EC parameters - Create ec_context.go with ECContext struct - NewDefaultECContext() creates context with default 10+4 configuration - Helper methods: CreateEncoder(), ToExt(), String() - Foundation for cleaner function signatures - No behavior change, still uses hardcoded 10+4 * refactor: update ec_encoder.go to use ECContext - Add WriteEcFilesWithContext() and RebuildEcFilesWithContext() functions - Keep old functions for backward compatibility (call new versions) - Update all internal functions to accept ECContext parameter - Use ctx.DataShards, ctx.ParityShards, ctx.TotalShards consistently - Use ctx.CreateEncoder() instead of hardcoded reedsolomon.New() - Use ctx.ToExt() for shard file extensions - No behavior change, still uses default 10+4 configuration * refactor: update ec_volume.go to use ECContext - Add ECContext field to EcVolume struct - Initialize ECContext with default configuration in NewEcVolume() - Update LocateEcShardNeedleInterval() to use ECContext.DataShards - Phase 1: Always uses default 10+4 configuration - No behavior change * refactor: add EC shard count fields to VolumeInfo protobuf - Add data_shards_count field (field 8) to VolumeInfo message - Add parity_shards_count field (field 9) to VolumeInfo message - Fields are optional, 0 means use default (10+4) - Backward compatible: fields added at end - Phase 1: Foundation for future customization * refactor: regenerate protobuf Go files with EC shard count fields - Regenerated volume_server_pb/*.go with new EC fields - DataShardsCount and ParityShardsCount accessors added to VolumeInfo - No behavior change, fields not yet used * refactor: update VolumeEcShardsGenerate to use ECContext - Create ECContext with default configuration in VolumeEcShardsGenerate - Use ecCtx.TotalShards and ecCtx.ToExt() in cleanup - Call WriteEcFilesWithContext() instead of WriteEcFiles() - Save EC configuration (DataShardsCount, ParityShardsCount) to VolumeInfo - Log EC context being used - Phase 1: Always uses default 10+4 configuration - No behavior change * fmt * refactor: update ec_test.go to use ECContext - Update TestEncodingDecoding to create and use ECContext - Update validateFiles() to accept ECContext parameter - Update removeGeneratedFiles() to use ctx.TotalShards and ctx.ToExt() - Test passes with default 10+4 configuration * refactor: use EcShardConfig message instead of separate fields * optimize: pre-calculate row sizes in EC encoding loop * refactor: replace TotalShards field with Total() method - Remove TotalShards field from ECContext to avoid field drift - Add Total() method that computes DataShards + ParityShards - Update all references to use ctx.Total() instead of ctx.TotalShards - Read EC config from VolumeInfo when loading EC volumes - Read data shard count from .vif in VolumeEcShardsToVolume - Use >= instead of > for exact boundary handling in encoding loops * optimize: simplify VolumeEcShardsToVolume to use existing EC context - Remove redundant CollectEcShards call - Remove redundant .vif file loading - Use v.ECContext.DataShards directly (already loaded by NewEcVolume) - Slice tempShards instead of collecting again * refactor: rename MaxShardId to MaxShardCount for clarity - Change from MaxShardId=31 to MaxShardCount=32 - Eliminates confusing +1 arithmetic (MaxShardId+1) - More intuitive: MaxShardCount directly represents the limit fix: support custom EC ratios beyond 14 shards in VolumeEcShardsToVolume - Add MaxShardId constant (31, since ShardBits is uint32) - Use MaxShardId+1 (32) instead of TotalShardsCount (14) for tempShards buffer - Prevents panic when slicing for volumes with >14 total shards - Critical fix for custom EC configurations like 20+10 * fix: add validation for EC shard counts from VolumeInfo - Validate DataShards/ParityShards are positive and within MaxShardCount - Prevent zero or invalid values that could cause divide-by-zero - Fallback to defaults if validation fails, with warning log - VolumeEcShardsGenerate now preserves existing EC config when regenerating - Critical safety fix for corrupted or legacy .vif files * fix: RebuildEcFiles now loads EC config from .vif file - Critical: RebuildEcFiles was always using default 10+4 config - Now loads actual EC config from .vif file when rebuilding shards - Validates config before use (positive shards, within MaxShardCount) - Falls back to default if .vif missing or invalid - Prevents data corruption when rebuilding custom EC volumes * add: defensive validation for dataShards in VolumeEcShardsToVolume - Validate dataShards > 0 and <= MaxShardCount before use - Prevents panic from corrupted or uninitialized ECContext - Returns clear error message instead of panic - Defense-in-depth: validates even though upstream should catch issues * fix: replace TotalShardsCount with MaxShardCount for custom EC ratio support Critical fixes to support custom EC ratios > 14 shards: disk_location_ec.go: - validateEcVolume: Check shards 0-31 instead of 0-13 during validation - removeEcVolumeFiles: Remove shards 0-31 instead of 0-13 during cleanup ec_volume_info.go ShardBits methods: - ShardIds(): Iterate up to MaxShardCount (32) instead of TotalShardsCount (14) - ToUint32Slice(): Iterate up to MaxShardCount (32) - IndexToShardId(): Iterate up to MaxShardCount (32) - MinusParityShards(): Remove shards 10-31 instead of 10-13 (added note about Phase 2) - Minus() shard size copy: Iterate up to MaxShardCount (32) - resizeShardSizes(): Iterate up to MaxShardCount (32) Without these changes: - Custom EC ratios > 14 total shards would fail validation on startup - Shards 14-31 would never be discovered or cleaned up - ShardBits operations would miss shards >= 14 These changes are backward compatible - MaxShardCount (32) includes the default TotalShardsCount (14), so existing 10+4 volumes work as before. * fix: replace TotalShardsCount with MaxShardCount in critical data structures Critical fixes for buffer allocations and loops that must support custom EC ratios up to 32 shards: Data Structures: - store_ec.go:354: Buffer allocation for shard recovery (bufs array) - topology_ec.go:14: EcShardLocations.Locations fixed array size - command_ec_rebuild.go:268: EC shard map allocation - command_ec_common.go:626: Shard-to-locations map allocation Shard Discovery Loops: - ec_task.go:378: Loop to find generated shard files - ec_shard_management.go: All 8 loops that check/count EC shards These changes are critical because: 1. Buffer allocations sized to 14 would cause index-out-of-bounds panics when accessing shards 14-31 2. Fixed arrays sized to 14 would truncate shard location data 3. Loops limited to 0-13 would never discover/manage shards 14-31 Note: command_ec_encode.go:208 intentionally NOT changed - it creates shard IDs to mount after encoding. In Phase 1 we always generate 14 shards, so this remains TotalShardsCount and will be made dynamic in Phase 2 based on actual EC context. Without these fixes, custom EC ratios > 14 total shards would cause: - Runtime panics (array index out of bounds) - Data loss (shards 14-31 never discovered/tracked) - Incomplete shard management (missing shards not detected) * refactor: move MaxShardCount constant to ec_encoder.go Moved MaxShardCount from ec_volume_info.go to ec_encoder.go to group it with other shard count constants (DataShardsCount, ParityShardsCount, TotalShardsCount). This improves code organization and makes it easier to understand the relationship between these constants. Location: ec_encoder.go line 22, between TotalShardsCount and MinTotalDisks * improve: add defensive programming and better error messages for EC Code review improvements from CodeRabbit: 1. ShardBits Guardrails (ec_volume_info.go): - AddShardId, RemoveShardId: Reject shard IDs >= MaxShardCount - HasShardId: Return false for out-of-range shard IDs - Prevents silent no-ops from bit shifts with invalid IDs 2. Future-Proof Regex (disk_location_ec.go): - Updated regex from \.ec[0-9][0-9] to \.ec\d{2,3} - Now matches .ec00 through .ec999 (currently .ec00-.ec31 used) - Supports future increases to MaxShardCount beyond 99 3. Better Error Messages (volume_grpc_erasure_coding.go): - Include valid range (1..32) in dataShards validation error - Helps operators quickly identify the problem 4. Validation Before Save (volume_grpc_erasure_coding.go): - Validate ECContext (DataShards > 0, ParityShards > 0, Total <= MaxShardCount) - Log EC config being saved to .vif for debugging - Prevents writing invalid configs to disk These changes improve robustness and debuggability without changing core functionality. * fmt * fix: critical bugs from code review + clean up comments Critical bug fixes: 1. command_ec_rebuild.go: Fixed indentation causing compilation error - Properly nested if/for blocks in registerEcNode 2. ec_shard_management.go: Fixed isComplete logic incorrectly using MaxShardCount - Changed from MaxShardCount (32) back to TotalShardsCount (14) - Default 10+4 volumes were being incorrectly reported as incomplete - Missing shards 14-31 were being incorrectly reported as missing - Fixed in 4 locations: volume completeness checks and getMissingShards 3. ec_volume_info.go: Fixed MinusParityShards removing too many shards - Changed from MaxShardCount (32) back to TotalShardsCount (14) - Was incorrectly removing shard IDs 10-31 instead of just 10-13 Comment cleanup: - Removed Phase 1/Phase 2 references (development plan context) - Replaced with clear statements about default 10+4 configuration - SeaweedFS repo uses fixed 10+4 EC ratio, no phases needed Root cause: Over-aggressive replacement of TotalShardsCount with MaxShardCount. MaxShardCount (32) is the limit for buffer allocations and shard ID loops, but TotalShardsCount (14) must be used for default EC configuration logic. * fix: add defensive bounds checks and compute actual shard counts Critical fixes from code review: 1. topology_ec.go: Add defensive bounds checks to AddShard/DeleteShard - Prevent panic when shardId >= MaxShardCount (32) - Return false instead of crashing on out-of-range shard IDs 2. command_ec_common.go: Fix doBalanceEcShardsAcrossRacks - Was using hardcoded TotalShardsCount (14) for all volumes - Now computes actual totalShardsForVolume from rackToShardCount - Fixes incorrect rebalancing for volumes with custom EC ratios - Example: 5+2=7 shards would incorrectly use 14 as average These fixes improve robustness and prepare for future custom EC ratios without changing current behavior for default 10+4 volumes. Note: MinusParityShards and ec_task.go intentionally NOT changed for seaweedfs repo - these will be enhanced in seaweed-enterprise repo where custom EC ratio configuration is added. * fmt * style: make MaxShardCount type casting explicit in loops Improved code clarity by explicitly casting MaxShardCount to the appropriate type when used in loop comparisons: - ShardId comparisons: Cast to ShardId(MaxShardCount) - uint32 comparisons: Cast to uint32(MaxShardCount) Changed in 5 locations: - Minus() loop (line 90) - ShardIds() loop (line 143) - ToUint32Slice() loop (line 152) - IndexToShardId() loop (line 219) - resizeShardSizes() loop (line 248) This makes the intent explicit and improves type safety readability. No functional changes - purely a style improvement.
2025-10-27 22:13:31 -07:00
EcShardConfig ec_shard_config = 8; // EC shard configuration (optional, null = use default 10+4)
}
// EcShardConfig specifies erasure coding shard configuration
message EcShardConfig {
uint32 data_shards = 1; // Number of data shards (e.g., 10)
uint32 parity_shards = 2; // Number of parity shards (e.g., 4)
2019-11-27 03:09:42 -08:00
}
2024-10-28 19:44:30 -07:00
message OldVersionVolumeInfo {
repeated RemoteFile files = 1;
uint32 version = 2;
string replication = 3;
uint32 BytesOffset = 4;
int64 dat_file_size = 5; // store the original dat file size
uint64 DestroyTime = 6; // expiration time of ec volume
bool read_only = 7;
}
2019-11-27 03:09:42 -08:00
2021-08-07 14:18:53 -07:00
// tiered storage
message VolumeTierMoveDatToRemoteRequest {
2019-11-27 03:09:42 -08:00
uint32 volume_id = 1;
string collection = 2;
string destination_backend_name = 3;
bool keep_local_dat_file = 4;
2019-11-27 03:09:42 -08:00
}
message VolumeTierMoveDatToRemoteResponse {
int64 processed = 1;
float processedPercentage = 2;
}
message VolumeTierMoveDatFromRemoteRequest {
uint32 volume_id = 1;
string collection = 2;
bool keep_remote_dat_file = 3;
}
message VolumeTierMoveDatFromRemoteResponse {
int64 processed = 1;
float processedPercentage = 2;
2019-11-27 03:09:42 -08:00
}
2020-02-21 21:45:03 -08:00
message VolumeServerStatusRequest {
}
message VolumeServerStatusResponse {
repeated DiskStatus disk_statuses = 1;
MemStatus memory_status = 2;
2022-06-12 11:56:23 -07:00
string version = 3;
string data_center = 4;
string rack = 5;
2020-02-21 21:45:03 -08:00
}
2020-09-13 21:25:51 -07:00
message VolumeServerLeaveRequest {
}
message VolumeServerLeaveResponse {
}
2021-08-07 14:18:53 -07:00
// remote storage
message FetchAndWriteNeedleRequest {
uint32 volume_id = 1;
uint64 needle_id = 2;
2021-08-09 14:35:18 -07:00
uint32 cookie = 3;
int64 offset = 4;
int64 size = 5;
2021-09-06 18:30:44 -07:00
message Replica {
string url = 1;
string public_url = 2;
int32 grpc_port = 3;
2021-09-06 18:30:44 -07:00
}
repeated Replica replicas = 6;
string auth = 7;
2021-08-07 14:18:53 -07:00
// remote conf
2021-08-26 15:18:34 -07:00
remote_pb.RemoteConf remote_conf = 15;
remote_pb.RemoteStorageLocation remote_location = 16;
2021-08-07 14:18:53 -07:00
}
message FetchAndWriteNeedleResponse {
string e_tag = 1;
2021-08-07 14:18:53 -07:00
}
2019-11-27 03:09:42 -08:00
// select on volume servers
message QueryRequest {
repeated string selections = 1;
repeated string from_file_ids = 2;
message Filter {
string field = 1;
string operand = 2;
string value = 3;
}
Filter filter = 3;
message InputSerialization {
// NONE | GZIP | BZIP2
string compression_type = 1;
message CSVInput {
string file_header_info = 1; // Valid values: NONE | USE | IGNORE
string record_delimiter = 2; // Default: \n
string field_delimiter = 3; // Default: ,
string quote_character = 4; // Default: "
string quote_escape_character = 5; // Default: "
string comments = 6; // Default: #
// If true, records might contain record delimiters within quote characters
bool allow_quoted_record_delimiter = 7; // default False.
}
message JSONInput {
string type = 1; // Valid values: DOCUMENT | LINES
}
message ParquetInput {
}
CSVInput csv_input = 2;
JSONInput json_input = 3;
ParquetInput parquet_input = 4;
}
InputSerialization input_serialization = 4;
message OutputSerialization {
message CSVOutput {
string quote_fields = 1; // Valid values: ALWAYS | ASNEEDED
string record_delimiter = 2; // Default: \n
string field_delimiter = 3; // Default: ,
string quote_character = 4; // Default: "
string quote_escape_character = 5; // Default: "
}
message JSONOutput {
string record_delimiter = 1;
}
CSVOutput csv_output = 2;
JSONOutput json_output = 3;
}
OutputSerialization output_serialization = 5;
}
message QueriedStripe {
bytes records = 1;
}
message VolumeNeedleStatusRequest {
uint32 volume_id = 1;
uint64 needle_id = 2;
}
message VolumeNeedleStatusResponse {
uint64 needle_id = 1;
uint32 cookie = 2;
uint32 size = 3;
uint64 last_modified = 4;
uint32 crc = 5;
string ttl = 6;
}
2022-04-01 16:37:06 -07:00
message PingRequest {
string target = 1; // default to ping itself
string target_type = 2;
}
message PingResponse {
2022-04-16 12:45:49 -07:00
int64 start_time_ns = 1;
int64 remote_time_ns = 2;
int64 stop_time_ns = 3;
2022-04-01 16:37:06 -07:00
}