convert error fromating to %w everywhere (#6995)

This commit is contained in:
Chris Lu 2025-07-16 23:39:27 -07:00 committed by GitHub
parent a524b4f485
commit 69553e5ba6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
174 changed files with 524 additions and 524 deletions

View File

@ -215,7 +215,7 @@ func (s *AdminServer) GetS3Buckets() ([]S3Bucket, error) {
})
if err != nil {
return nil, fmt.Errorf("failed to get volume information: %v", err)
return nil, fmt.Errorf("failed to get volume information: %w", err)
}
// Get filer configuration to determine FilerGroup
@ -232,7 +232,7 @@ func (s *AdminServer) GetS3Buckets() ([]S3Bucket, error) {
})
if err != nil {
return nil, fmt.Errorf("failed to get filer configuration: %v", err)
return nil, fmt.Errorf("failed to get filer configuration: %w", err)
}
// Now list buckets from the filer and match with collection data
@ -330,7 +330,7 @@ func (s *AdminServer) GetS3Buckets() ([]S3Bucket, error) {
})
if err != nil {
return nil, fmt.Errorf("failed to list Object Store buckets: %v", err)
return nil, fmt.Errorf("failed to list Object Store buckets: %w", err)
}
return buckets, nil
@ -355,7 +355,7 @@ func (s *AdminServer) GetBucketDetails(bucketName string) (*BucketDetails, error
Name: bucketName,
})
if err != nil {
return fmt.Errorf("bucket not found: %v", err)
return fmt.Errorf("bucket not found: %w", err)
}
details.Bucket.CreatedAt = time.Unix(bucketResp.Entry.Attributes.Crtime, 0)
@ -488,7 +488,7 @@ func (s *AdminServer) DeleteS3Bucket(bucketName string) error {
IgnoreRecursiveError: false,
})
if err != nil {
return fmt.Errorf("failed to delete bucket: %v", err)
return fmt.Errorf("failed to delete bucket: %w", err)
}
return nil
@ -687,7 +687,7 @@ func (s *AdminServer) GetClusterFilers() (*ClusterFilersData, error) {
})
if err != nil {
return nil, fmt.Errorf("failed to get filer nodes from master: %v", err)
return nil, fmt.Errorf("failed to get filer nodes from master: %w", err)
}
return &ClusterFilersData{
@ -729,7 +729,7 @@ func (s *AdminServer) GetClusterBrokers() (*ClusterBrokersData, error) {
})
if err != nil {
return nil, fmt.Errorf("failed to get broker nodes from master: %v", err)
return nil, fmt.Errorf("failed to get broker nodes from master: %w", err)
}
return &ClusterBrokersData{
@ -1170,7 +1170,7 @@ func (as *AdminServer) getMaintenanceConfig() (*maintenance.MaintenanceConfigDat
func (as *AdminServer) updateMaintenanceConfig(config *maintenance.MaintenanceConfig) error {
// Save configuration to persistent storage
if err := as.configPersistence.SaveMaintenanceConfig(config); err != nil {
return fmt.Errorf("failed to save maintenance configuration: %v", err)
return fmt.Errorf("failed to save maintenance configuration: %w", err)
}
// Update maintenance manager if available
@ -1357,7 +1357,7 @@ func (s *AdminServer) CreateTopicWithRetention(namespace, name string, partition
// Find broker leader to create the topic
brokerLeader, err := s.findBrokerLeader()
if err != nil {
return fmt.Errorf("failed to find broker leader: %v", err)
return fmt.Errorf("failed to find broker leader: %w", err)
}
// Create retention configuration
@ -1391,7 +1391,7 @@ func (s *AdminServer) CreateTopicWithRetention(namespace, name string, partition
})
if err != nil {
return fmt.Errorf("failed to create topic: %v", err)
return fmt.Errorf("failed to create topic: %w", err)
}
glog.V(0).Infof("Created topic %s.%s with %d partitions (retention: enabled=%v, seconds=%d)",
@ -1421,7 +1421,7 @@ func (s *AdminServer) UpdateTopicRetention(namespace, name string, enabled bool,
})
if err != nil {
return fmt.Errorf("failed to get broker nodes from master: %v", err)
return fmt.Errorf("failed to get broker nodes from master: %w", err)
}
if brokerAddress == "" {
@ -1431,7 +1431,7 @@ func (s *AdminServer) UpdateTopicRetention(namespace, name string, enabled bool,
// Create gRPC connection
conn, err := grpc.Dial(brokerAddress, s.grpcDialOption)
if err != nil {
return fmt.Errorf("failed to connect to broker: %v", err)
return fmt.Errorf("failed to connect to broker: %w", err)
}
defer conn.Close()
@ -1448,7 +1448,7 @@ func (s *AdminServer) UpdateTopicRetention(namespace, name string, enabled bool,
},
})
if err != nil {
return fmt.Errorf("failed to get current topic configuration: %v", err)
return fmt.Errorf("failed to get current topic configuration: %w", err)
}
// Create the topic configuration request, preserving all existing settings
@ -1480,7 +1480,7 @@ func (s *AdminServer) UpdateTopicRetention(namespace, name string, enabled bool,
// Send the configuration request with preserved settings
_, err = client.ConfigureTopic(ctx, configRequest)
if err != nil {
return fmt.Errorf("failed to update topic retention: %v", err)
return fmt.Errorf("failed to update topic retention: %w", err)
}
glog.V(0).Infof("Updated topic %s.%s retention (enabled: %v, seconds: %d) while preserving %d partitions",

View File

@ -251,7 +251,7 @@ func (s *AdminServer) SetBucketQuota(bucketName string, quotaBytes int64, quotaE
Name: bucketName,
})
if err != nil {
return fmt.Errorf("bucket not found: %v", err)
return fmt.Errorf("bucket not found: %w", err)
}
bucketEntry := lookupResp.Entry
@ -275,7 +275,7 @@ func (s *AdminServer) SetBucketQuota(bucketName string, quotaBytes int64, quotaE
Entry: bucketEntry,
})
if err != nil {
return fmt.Errorf("failed to update bucket quota: %v", err)
return fmt.Errorf("failed to update bucket quota: %w", err)
}
return nil
@ -308,7 +308,7 @@ func (s *AdminServer) CreateS3BucketWithObjectLock(bucketName string, quotaBytes
})
// Ignore error if directory already exists
if err != nil && !strings.Contains(err.Error(), "already exists") && !strings.Contains(err.Error(), "existing entry") {
return fmt.Errorf("failed to create /buckets directory: %v", err)
return fmt.Errorf("failed to create /buckets directory: %w", err)
}
// Check if bucket already exists
@ -368,7 +368,7 @@ func (s *AdminServer) CreateS3BucketWithObjectLock(bucketName string, quotaBytes
},
})
if err != nil {
return fmt.Errorf("failed to create bucket directory: %v", err)
return fmt.Errorf("failed to create bucket directory: %w", err)
}
return nil

View File

@ -25,7 +25,7 @@ func (s *AdminServer) GetClusterTopology() (*ClusterTopology, error) {
if err != nil {
currentMaster := s.masterClient.GetMaster(context.Background())
glog.Errorf("Failed to connect to master server %s: %v", currentMaster, err)
return nil, fmt.Errorf("gRPC topology request failed: %v", err)
return nil, fmt.Errorf("gRPC topology request failed: %w", err)
}
// Cache the result

View File

@ -40,18 +40,18 @@ func (cp *ConfigPersistence) SaveMaintenanceConfig(config *MaintenanceConfig) er
// Create directory if it doesn't exist
if err := os.MkdirAll(cp.dataDir, ConfigDirPermissions); err != nil {
return fmt.Errorf("failed to create config directory: %v", err)
return fmt.Errorf("failed to create config directory: %w", err)
}
// Marshal configuration to JSON
configData, err := json.MarshalIndent(config, "", " ")
if err != nil {
return fmt.Errorf("failed to marshal maintenance config: %v", err)
return fmt.Errorf("failed to marshal maintenance config: %w", err)
}
// Write to file
if err := os.WriteFile(configPath, configData, ConfigFilePermissions); err != nil {
return fmt.Errorf("failed to write maintenance config file: %v", err)
return fmt.Errorf("failed to write maintenance config file: %w", err)
}
glog.V(1).Infof("Saved maintenance configuration to %s", configPath)
@ -76,13 +76,13 @@ func (cp *ConfigPersistence) LoadMaintenanceConfig() (*MaintenanceConfig, error)
// Read file
configData, err := os.ReadFile(configPath)
if err != nil {
return nil, fmt.Errorf("failed to read maintenance config file: %v", err)
return nil, fmt.Errorf("failed to read maintenance config file: %w", err)
}
// Unmarshal JSON
var config MaintenanceConfig
if err := json.Unmarshal(configData, &config); err != nil {
return nil, fmt.Errorf("failed to unmarshal maintenance config: %v", err)
return nil, fmt.Errorf("failed to unmarshal maintenance config: %w", err)
}
glog.V(1).Infof("Loaded maintenance configuration from %s", configPath)
@ -99,18 +99,18 @@ func (cp *ConfigPersistence) SaveAdminConfig(config map[string]interface{}) erro
// Create directory if it doesn't exist
if err := os.MkdirAll(cp.dataDir, ConfigDirPermissions); err != nil {
return fmt.Errorf("failed to create config directory: %v", err)
return fmt.Errorf("failed to create config directory: %w", err)
}
// Marshal configuration to JSON
configData, err := json.MarshalIndent(config, "", " ")
if err != nil {
return fmt.Errorf("failed to marshal admin config: %v", err)
return fmt.Errorf("failed to marshal admin config: %w", err)
}
// Write to file
if err := os.WriteFile(configPath, configData, ConfigFilePermissions); err != nil {
return fmt.Errorf("failed to write admin config file: %v", err)
return fmt.Errorf("failed to write admin config file: %w", err)
}
glog.V(1).Infof("Saved admin configuration to %s", configPath)
@ -135,13 +135,13 @@ func (cp *ConfigPersistence) LoadAdminConfig() (map[string]interface{}, error) {
// Read file
configData, err := os.ReadFile(configPath)
if err != nil {
return nil, fmt.Errorf("failed to read admin config file: %v", err)
return nil, fmt.Errorf("failed to read admin config file: %w", err)
}
// Unmarshal JSON
var config map[string]interface{}
if err := json.Unmarshal(configData, &config); err != nil {
return nil, fmt.Errorf("failed to unmarshal admin config: %v", err)
return nil, fmt.Errorf("failed to unmarshal admin config: %w", err)
}
glog.V(1).Infof("Loaded admin configuration from %s", configPath)
@ -164,7 +164,7 @@ func (cp *ConfigPersistence) ListConfigFiles() ([]string, error) {
files, err := os.ReadDir(cp.dataDir)
if err != nil {
return nil, fmt.Errorf("failed to read config directory: %v", err)
return nil, fmt.Errorf("failed to read config directory: %w", err)
}
var configFiles []string
@ -196,11 +196,11 @@ func (cp *ConfigPersistence) BackupConfig(filename string) error {
// Copy file
configData, err := os.ReadFile(configPath)
if err != nil {
return fmt.Errorf("failed to read config file: %v", err)
return fmt.Errorf("failed to read config file: %w", err)
}
if err := os.WriteFile(backupPath, configData, ConfigFilePermissions); err != nil {
return fmt.Errorf("failed to create backup: %v", err)
return fmt.Errorf("failed to create backup: %w", err)
}
glog.V(1).Infof("Created backup of %s as %s", filename, backupName)
@ -221,13 +221,13 @@ func (cp *ConfigPersistence) RestoreConfig(filename, backupName string) error {
// Read backup file
backupData, err := os.ReadFile(backupPath)
if err != nil {
return fmt.Errorf("failed to read backup file: %v", err)
return fmt.Errorf("failed to read backup file: %w", err)
}
// Write to config file
configPath := filepath.Join(cp.dataDir, filename)
if err := os.WriteFile(configPath, backupData, ConfigFilePermissions); err != nil {
return fmt.Errorf("failed to restore config: %v", err)
return fmt.Errorf("failed to restore config: %w", err)
}
glog.V(1).Infof("Restored %s from backup %s", filename, backupName)

View File

@ -154,7 +154,7 @@ func (s *AdminServer) GetTopicDetails(namespace, topicName string) (*TopicDetail
// Find broker leader
brokerLeader, err := s.findBrokerLeader()
if err != nil {
return nil, fmt.Errorf("failed to find broker leader: %v", err)
return nil, fmt.Errorf("failed to find broker leader: %w", err)
}
var topicDetails *TopicDetailsData
@ -172,7 +172,7 @@ func (s *AdminServer) GetTopicDetails(namespace, topicName string) (*TopicDetail
},
})
if err != nil {
return fmt.Errorf("failed to get topic configuration: %v", err)
return fmt.Errorf("failed to get topic configuration: %w", err)
}
// Initialize topic details
@ -297,7 +297,7 @@ func (s *AdminServer) GetConsumerGroupOffsets(namespace, topicName string) ([]Co
if err == io.EOF {
break
}
return fmt.Errorf("failed to receive version entries: %v", err)
return fmt.Errorf("failed to receive version entries: %w", err)
}
// Only process directories that are versions (start with "v")
@ -398,7 +398,7 @@ func (s *AdminServer) GetConsumerGroupOffsets(namespace, topicName string) ([]Co
})
if err != nil {
return nil, fmt.Errorf("failed to get consumer group offsets: %v", err)
return nil, fmt.Errorf("failed to get consumer group offsets: %w", err)
}
return offsets, nil
@ -544,7 +544,7 @@ func (s *AdminServer) findBrokerLeader() (string, error) {
})
if err != nil {
return "", fmt.Errorf("failed to list brokers: %v", err)
return "", fmt.Errorf("failed to list brokers: %w", err)
}
if len(brokers) == 0 {

View File

@ -34,7 +34,7 @@ func (p *TopicRetentionPurger) PurgeExpiredTopicData() error {
// Get all topics with retention enabled
topics, err := p.getTopicsWithRetention()
if err != nil {
return fmt.Errorf("failed to get topics with retention: %v", err)
return fmt.Errorf("failed to get topics with retention: %w", err)
}
glog.V(1).Infof("Found %d topics with retention enabled", len(topics))
@ -67,7 +67,7 @@ func (p *TopicRetentionPurger) getTopicsWithRetention() ([]TopicRetentionConfig,
// Find broker leader to get topics
brokerLeader, err := p.adminServer.findBrokerLeader()
if err != nil {
return nil, fmt.Errorf("failed to find broker leader: %v", err)
return nil, fmt.Errorf("failed to find broker leader: %w", err)
}
// Get all topics from the broker
@ -147,7 +147,7 @@ func (p *TopicRetentionPurger) purgeTopicData(topicRetention TopicRetentionConfi
if err == io.EOF {
break
}
return fmt.Errorf("failed to receive version entries: %v", err)
return fmt.Errorf("failed to receive version entries: %w", err)
}
// Only process directories that are versions (start with "v")
@ -257,7 +257,7 @@ func (p *TopicRetentionPurger) deleteDirectoryRecursively(client filer_pb.Seawee
if err == io.EOF {
break
}
return fmt.Errorf("failed to receive entries: %v", err)
return fmt.Errorf("failed to receive entries: %w", err)
}
entryPath := filepath.Join(dirPath, resp.Entry.Name)

View File

@ -53,7 +53,7 @@ func (s *AdminServer) CreateObjectStoreUser(req CreateUserRequest) (*ObjectStore
if err == credential.ErrUserAlreadyExists {
return nil, fmt.Errorf("user %s already exists", req.Username)
}
return nil, fmt.Errorf("failed to create user: %v", err)
return nil, fmt.Errorf("failed to create user: %w", err)
}
// Return created user
@ -82,7 +82,7 @@ func (s *AdminServer) UpdateObjectStoreUser(username string, req UpdateUserReque
if err == credential.ErrUserNotFound {
return nil, fmt.Errorf("user %s not found", username)
}
return nil, fmt.Errorf("failed to get user: %v", err)
return nil, fmt.Errorf("failed to get user: %w", err)
}
// Create updated identity
@ -112,7 +112,7 @@ func (s *AdminServer) UpdateObjectStoreUser(username string, req UpdateUserReque
// Update user using credential manager
err = s.credentialManager.UpdateUser(ctx, username, updatedIdentity)
if err != nil {
return nil, fmt.Errorf("failed to update user: %v", err)
return nil, fmt.Errorf("failed to update user: %w", err)
}
// Return updated user
@ -145,7 +145,7 @@ func (s *AdminServer) DeleteObjectStoreUser(username string) error {
if err == credential.ErrUserNotFound {
return fmt.Errorf("user %s not found", username)
}
return fmt.Errorf("failed to delete user: %v", err)
return fmt.Errorf("failed to delete user: %w", err)
}
return nil
@ -165,7 +165,7 @@ func (s *AdminServer) GetObjectStoreUserDetails(username string) (*UserDetails,
if err == credential.ErrUserNotFound {
return nil, fmt.Errorf("user %s not found", username)
}
return nil, fmt.Errorf("failed to get user: %v", err)
return nil, fmt.Errorf("failed to get user: %w", err)
}
details := &UserDetails{
@ -204,7 +204,7 @@ func (s *AdminServer) CreateAccessKey(username string) (*AccessKeyInfo, error) {
if err == credential.ErrUserNotFound {
return nil, fmt.Errorf("user %s not found", username)
}
return nil, fmt.Errorf("failed to get user: %v", err)
return nil, fmt.Errorf("failed to get user: %w", err)
}
// Generate new access key
@ -219,7 +219,7 @@ func (s *AdminServer) CreateAccessKey(username string) (*AccessKeyInfo, error) {
// Create access key using credential manager
err = s.credentialManager.CreateAccessKey(ctx, username, credential)
if err != nil {
return nil, fmt.Errorf("failed to create access key: %v", err)
return nil, fmt.Errorf("failed to create access key: %w", err)
}
return &AccessKeyInfo{
@ -246,7 +246,7 @@ func (s *AdminServer) DeleteAccessKey(username, accessKeyId string) error {
if err == credential.ErrAccessKeyNotFound {
return fmt.Errorf("access key %s not found for user %s", accessKeyId, username)
}
return fmt.Errorf("failed to delete access key: %v", err)
return fmt.Errorf("failed to delete access key: %w", err)
}
return nil
@ -266,7 +266,7 @@ func (s *AdminServer) GetUserPolicies(username string) ([]string, error) {
if err == credential.ErrUserNotFound {
return nil, fmt.Errorf("user %s not found", username)
}
return nil, fmt.Errorf("failed to get user: %v", err)
return nil, fmt.Errorf("failed to get user: %w", err)
}
return identity.Actions, nil
@ -286,7 +286,7 @@ func (s *AdminServer) UpdateUserPolicies(username string, actions []string) erro
if err == credential.ErrUserNotFound {
return fmt.Errorf("user %s not found", username)
}
return fmt.Errorf("failed to get user: %v", err)
return fmt.Errorf("failed to get user: %w", err)
}
// Create updated identity with new actions
@ -300,7 +300,7 @@ func (s *AdminServer) UpdateUserPolicies(username string, actions []string) erro
// Update user using credential manager
err = s.credentialManager.UpdateUser(ctx, username, updatedIdentity)
if err != nil {
return fmt.Errorf("failed to update user policies: %v", err)
return fmt.Errorf("failed to update user policies: %w", err)
}
return nil

View File

@ -133,7 +133,7 @@ func (s *WorkerGrpcServer) WorkerStream(stream worker_pb.WorkerService_WorkerStr
// Wait for initial registration message
msg, err := stream.Recv()
if err != nil {
return fmt.Errorf("failed to receive registration message: %v", err)
return fmt.Errorf("failed to receive registration message: %w", err)
}
registration := msg.GetRegistration()

View File

@ -307,19 +307,19 @@ func (h *FileBrowserHandlers) uploadFileToFiler(filePath string, fileHeader *mul
// Validate and sanitize the filer address
if err := h.validateFilerAddress(filerAddress); err != nil {
return fmt.Errorf("invalid filer address: %v", err)
return fmt.Errorf("invalid filer address: %w", err)
}
// Validate and sanitize the file path
cleanFilePath, err := h.validateAndCleanFilePath(filePath)
if err != nil {
return fmt.Errorf("invalid file path: %v", err)
return fmt.Errorf("invalid file path: %w", err)
}
// Open the file
file, err := fileHeader.Open()
if err != nil {
return fmt.Errorf("failed to open file: %v", err)
return fmt.Errorf("failed to open file: %w", err)
}
defer file.Close()
@ -330,19 +330,19 @@ func (h *FileBrowserHandlers) uploadFileToFiler(filePath string, fileHeader *mul
// Create form file field
part, err := writer.CreateFormFile("file", fileHeader.Filename)
if err != nil {
return fmt.Errorf("failed to create form file: %v", err)
return fmt.Errorf("failed to create form file: %w", err)
}
// Copy file content to form
_, err = io.Copy(part, file)
if err != nil {
return fmt.Errorf("failed to copy file content: %v", err)
return fmt.Errorf("failed to copy file content: %w", err)
}
// Close the writer to finalize the form
err = writer.Close()
if err != nil {
return fmt.Errorf("failed to close multipart writer: %v", err)
return fmt.Errorf("failed to close multipart writer: %w", err)
}
// Create the upload URL with validated components
@ -351,7 +351,7 @@ func (h *FileBrowserHandlers) uploadFileToFiler(filePath string, fileHeader *mul
// Create HTTP request
req, err := http.NewRequest("POST", uploadURL, &body)
if err != nil {
return fmt.Errorf("failed to create request: %v", err)
return fmt.Errorf("failed to create request: %w", err)
}
// Set content type with boundary
@ -361,7 +361,7 @@ func (h *FileBrowserHandlers) uploadFileToFiler(filePath string, fileHeader *mul
client := &http.Client{Timeout: 60 * time.Second} // Increased timeout for larger files
resp, err := client.Do(req)
if err != nil {
return fmt.Errorf("failed to upload file: %v", err)
return fmt.Errorf("failed to upload file: %w", err)
}
defer resp.Body.Close()
@ -383,7 +383,7 @@ func (h *FileBrowserHandlers) validateFilerAddress(address string) error {
// Parse the address to validate it's a proper host:port format
host, port, err := net.SplitHostPort(address)
if err != nil {
return fmt.Errorf("invalid address format: %v", err)
return fmt.Errorf("invalid address format: %w", err)
}
// Validate host is not empty
@ -398,7 +398,7 @@ func (h *FileBrowserHandlers) validateFilerAddress(address string) error {
portNum, err := strconv.Atoi(port)
if err != nil {
return fmt.Errorf("invalid port number: %v", err)
return fmt.Errorf("invalid port number: %w", err)
}
if portNum < 1 || portNum > 65535 {

View File

@ -53,7 +53,7 @@ func (mm *MaintenanceManager) Start() error {
// Validate configuration durations to prevent ticker panics
if err := mm.validateConfig(); err != nil {
return fmt.Errorf("invalid maintenance configuration: %v", err)
return fmt.Errorf("invalid maintenance configuration: %w", err)
}
mm.running = true

View File

@ -35,7 +35,7 @@ func (ms *MaintenanceScanner) ScanForMaintenanceTasks() ([]*TaskDetectionResult,
// Get volume health metrics
volumeMetrics, err := ms.getVolumeHealthMetrics()
if err != nil {
return nil, fmt.Errorf("failed to get volume health metrics: %v", err)
return nil, fmt.Errorf("failed to get volume health metrics: %w", err)
}
// Use task system for all task types

View File

@ -159,7 +159,7 @@ func (mws *MaintenanceWorkerService) executeGenericTask(task *MaintenanceTask) e
// Create task instance using the registry
taskInstance, err := mws.taskRegistry.CreateTask(taskType, taskParams)
if err != nil {
return fmt.Errorf("failed to create task instance: %v", err)
return fmt.Errorf("failed to create task instance: %w", err)
}
// Update progress to show task has started
@ -168,7 +168,7 @@ func (mws *MaintenanceWorkerService) executeGenericTask(task *MaintenanceTask) e
// Execute the task
err = taskInstance.Execute(taskParams)
if err != nil {
return fmt.Errorf("task execution failed: %v", err)
return fmt.Errorf("task execution failed: %w", err)
}
// Update progress to show completion
@ -405,7 +405,7 @@ func (mwc *MaintenanceWorkerCommand) Run() error {
// Start the worker service
err := mwc.workerService.Start()
if err != nil {
return fmt.Errorf("failed to start maintenance worker: %v", err)
return fmt.Errorf("failed to start maintenance worker: %w", err)
}
// Wait for interrupt signal

View File

@ -186,7 +186,7 @@ func startAdminServer(ctx context.Context, options AdminOptions) error {
sessionKeyBytes := make([]byte, 32)
_, err := rand.Read(sessionKeyBytes)
if err != nil {
return fmt.Errorf("failed to generate session key: %v", err)
return fmt.Errorf("failed to generate session key: %w", err)
}
store := cookie.NewStore(sessionKeyBytes)
r.Use(sessions.Sessions("admin-session", store))
@ -234,7 +234,7 @@ func startAdminServer(ctx context.Context, options AdminOptions) error {
// Start worker gRPC server for worker connections
err = adminServer.StartWorkerGrpcServer(*options.port)
if err != nil {
return fmt.Errorf("failed to start worker gRPC server: %v", err)
return fmt.Errorf("failed to start worker gRPC server: %w", err)
}
// Set up cleanup for gRPC server
@ -304,7 +304,7 @@ func startAdminServer(ctx context.Context, options AdminOptions) error {
defer cancel()
if err := server.Shutdown(shutdownCtx); err != nil {
return fmt.Errorf("admin server forced to shutdown: %v", err)
return fmt.Errorf("admin server forced to shutdown: %w", err)
}
return nil
@ -328,7 +328,7 @@ func expandHomeDir(path string) (string, error) {
// Get current user
currentUser, err := user.Current()
if err != nil {
return "", fmt.Errorf("failed to get current user: %v", err)
return "", fmt.Errorf("failed to get current user: %w", err)
}
// Handle different tilde patterns

View File

@ -268,7 +268,7 @@ func (worker *FileCopyWorker) doEachCopy(task FileCopyTask) error {
}
if shouldCopy, err := worker.checkExistingFileFirst(task, f); err != nil {
return fmt.Errorf("check existing file: %v", err)
return fmt.Errorf("check existing file: %w", err)
} else if !shouldCopy {
if *worker.options.verbose {
fmt.Printf("skipping copied file: %v\n", f.Name())
@ -395,7 +395,7 @@ func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) err
}
if err := filer_pb.CreateEntry(context.Background(), client, request); err != nil {
return fmt.Errorf("update fh: %v", err)
return fmt.Errorf("update fh: %w", err)
}
return nil
}); err != nil {
@ -428,7 +428,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File,
uploader, err := operation.NewUploader()
if err != nil {
uploadError = fmt.Errorf("upload data %v: %v\n", fileName, err)
uploadError = fmt.Errorf("upload data %v: %w\n", fileName, err)
return
}
@ -456,7 +456,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File,
)
if err != nil {
uploadError = fmt.Errorf("upload data %v: %v\n", fileName, err)
uploadError = fmt.Errorf("upload data %v: %w\n", fileName, err)
return
}
if uploadResult.Error != "" {
@ -489,7 +489,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File,
manifestedChunks, manifestErr := filer.MaybeManifestize(worker.saveDataAsChunk, chunks)
if manifestErr != nil {
return fmt.Errorf("create manifest: %v", manifestErr)
return fmt.Errorf("create manifest: %w", manifestErr)
}
if err := pb.WithGrpcFilerClient(false, worker.signature, worker.filerAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
@ -512,7 +512,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File,
}
if err := filer_pb.CreateEntry(context.Background(), client, request); err != nil {
return fmt.Errorf("update fh: %v", err)
return fmt.Errorf("update fh: %w", err)
}
return nil
}); err != nil {
@ -546,7 +546,7 @@ func detectMimeType(f *os.File) string {
func (worker *FileCopyWorker) saveDataAsChunk(reader io.Reader, name string, offset int64, tsNs int64) (chunk *filer_pb.FileChunk, err error) {
uploader, uploaderErr := operation.NewUploader()
if uploaderErr != nil {
return nil, fmt.Errorf("upload data: %v", uploaderErr)
return nil, fmt.Errorf("upload data: %w", uploaderErr)
}
finalFileId, uploadResult, flushErr, _ := uploader.UploadWithRetry(
@ -573,7 +573,7 @@ func (worker *FileCopyWorker) saveDataAsChunk(reader io.Reader, name string, off
)
if flushErr != nil {
return nil, fmt.Errorf("upload data: %v", flushErr)
return nil, fmt.Errorf("upload data: %w", flushErr)
}
if uploadResult.Error != "" {
return nil, fmt.Errorf("upload result: %v", uploadResult.Error)

View File

@ -133,14 +133,14 @@ func (metaBackup *FilerMetaBackupOptions) traverseMetadata() (err error) {
println("+", parentPath.Child(entry.Name))
if err := metaBackup.store.InsertEntry(context.Background(), filer.FromPbEntry(string(parentPath), entry)); err != nil {
saveErr = fmt.Errorf("insert entry error: %v\n", err)
saveErr = fmt.Errorf("insert entry error: %w\n", err)
return
}
})
if traverseErr != nil {
return fmt.Errorf("traverse: %v", traverseErr)
return fmt.Errorf("traverse: %w", traverseErr)
}
return saveErr
}

View File

@ -23,7 +23,7 @@ func (option *RemoteGatewayOptions) followBucketUpdatesAndUploadToRemote(filerSo
// read filer remote storage mount mappings
if detectErr := option.collectRemoteStorageConf(); detectErr != nil {
return fmt.Errorf("read mount info: %v", detectErr)
return fmt.Errorf("read mount info: %w", detectErr)
}
eachEntryFunc, err := option.makeBucketedEventProcessor(filerSource)
@ -168,7 +168,7 @@ func (option *RemoteGatewayOptions) makeBucketedEventProcessor(filerSource *sour
if message.NewEntry.Name == filer.REMOTE_STORAGE_MOUNT_FILE {
newMappings, readErr := filer.UnmarshalRemoteStorageMappings(message.NewEntry.Content)
if readErr != nil {
return fmt.Errorf("unmarshal mappings: %v", readErr)
return fmt.Errorf("unmarshal mappings: %w", readErr)
}
option.mappings = newMappings
}

View File

@ -25,7 +25,7 @@ func followUpdatesAndUploadToRemote(option *RemoteSyncOptions, filerSource *sour
// read filer remote storage mount mappings
_, _, remoteStorageMountLocation, remoteStorage, detectErr := filer.DetectMountInfo(option.grpcDialOption, pb.ServerAddress(*option.filerAddress), mountedDir)
if detectErr != nil {
return fmt.Errorf("read mount info: %v", detectErr)
return fmt.Errorf("read mount info: %w", detectErr)
}
eachEntryFunc, err := option.makeEventProcessor(remoteStorage, mountedDir, remoteStorageMountLocation, filerSource)
@ -99,7 +99,7 @@ func (option *RemoteSyncOptions) makeEventProcessor(remoteStorage *remote_pb.Rem
if message.NewEntry.Name == filer.REMOTE_STORAGE_MOUNT_FILE {
mappings, readErr := filer.UnmarshalRemoteStorageMappings(message.NewEntry.Content)
if readErr != nil {
return fmt.Errorf("unmarshal mappings: %v", readErr)
return fmt.Errorf("unmarshal mappings: %w", readErr)
}
if remoteLoc, found := mappings.Mappings[mountedDir]; found {
if remoteStorageMountLocation.Bucket != remoteLoc.Bucket || remoteStorageMountLocation.Path != remoteLoc.Path {

View File

@ -170,7 +170,7 @@ func doFixOneVolume(basepath string, baseFileName string, collection string, vol
}
if err := storage.ScanVolumeFile(basepath, collection, vid, storage.NeedleMapInMemory, scanner); err != nil {
err := fmt.Errorf("scan .dat File: %v", err)
err := fmt.Errorf("scan .dat File: %w", err)
if *fixIgnoreError {
glog.Error(err)
} else {
@ -179,7 +179,7 @@ func doFixOneVolume(basepath string, baseFileName string, collection string, vol
}
if err := SaveToIdx(scanner, indexFileName); err != nil {
err := fmt.Errorf("save to .idx File: %v", err)
err := fmt.Errorf("save to .idx File: %w", err)
if *fixIgnoreError {
glog.Error(err)
} else {

View File

@ -92,7 +92,7 @@ func startMasterFollower(masterOptions MasterOptions) {
err = pb.WithOneOfGrpcMasterClients(false, masters, grpcDialOption, func(client master_pb.SeaweedClient) error {
resp, err := client.GetMasterConfiguration(context.Background(), &master_pb.GetMasterConfigurationRequest{})
if err != nil {
return fmt.Errorf("get master grpc address %v configuration: %v", masters, err)
return fmt.Errorf("get master grpc address %v configuration: %w", masters, err)
}
masterOptions.defaultReplication = &resp.DefaultReplication
masterOptions.volumeSizeLimitMB = aws.Uint(uint(resp.VolumeSizeLimitMB))

View File

@ -78,7 +78,7 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
err = pb.WithOneOfGrpcFilerClients(false, filerAddresses, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
if err != nil {
return fmt.Errorf("get filer grpc address %v configuration: %v", filerAddresses, err)
return fmt.Errorf("get filer grpc address %v configuration: %w", filerAddresses, err)
}
cipher = resp.Cipher
return nil

View File

@ -34,7 +34,7 @@ func (store *FilerEtcStore) SaveConfiguration(ctx context.Context, config *iam_p
return store.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
var buf bytes.Buffer
if err := filer.ProtoToText(&buf, config); err != nil {
return fmt.Errorf("failed to marshal configuration: %v", err)
return fmt.Errorf("failed to marshal configuration: %w", err)
}
return filer.SaveInsideFiler(client, filer.IamConfigDirectory, filer.IamIdentityFile, buf.Bytes())
})
@ -44,7 +44,7 @@ func (store *FilerEtcStore) CreateUser(ctx context.Context, identity *iam_pb.Ide
// Load existing configuration
config, err := store.LoadConfiguration(ctx)
if err != nil {
return fmt.Errorf("failed to load configuration: %v", err)
return fmt.Errorf("failed to load configuration: %w", err)
}
// Check if user already exists
@ -64,7 +64,7 @@ func (store *FilerEtcStore) CreateUser(ctx context.Context, identity *iam_pb.Ide
func (store *FilerEtcStore) GetUser(ctx context.Context, username string) (*iam_pb.Identity, error) {
config, err := store.LoadConfiguration(ctx)
if err != nil {
return nil, fmt.Errorf("failed to load configuration: %v", err)
return nil, fmt.Errorf("failed to load configuration: %w", err)
}
for _, identity := range config.Identities {
@ -79,7 +79,7 @@ func (store *FilerEtcStore) GetUser(ctx context.Context, username string) (*iam_
func (store *FilerEtcStore) UpdateUser(ctx context.Context, username string, identity *iam_pb.Identity) error {
config, err := store.LoadConfiguration(ctx)
if err != nil {
return fmt.Errorf("failed to load configuration: %v", err)
return fmt.Errorf("failed to load configuration: %w", err)
}
// Find and update the user
@ -96,7 +96,7 @@ func (store *FilerEtcStore) UpdateUser(ctx context.Context, username string, ide
func (store *FilerEtcStore) DeleteUser(ctx context.Context, username string) error {
config, err := store.LoadConfiguration(ctx)
if err != nil {
return fmt.Errorf("failed to load configuration: %v", err)
return fmt.Errorf("failed to load configuration: %w", err)
}
// Find and remove the user
@ -113,7 +113,7 @@ func (store *FilerEtcStore) DeleteUser(ctx context.Context, username string) err
func (store *FilerEtcStore) ListUsers(ctx context.Context) ([]string, error) {
config, err := store.LoadConfiguration(ctx)
if err != nil {
return nil, fmt.Errorf("failed to load configuration: %v", err)
return nil, fmt.Errorf("failed to load configuration: %w", err)
}
var usernames []string
@ -127,7 +127,7 @@ func (store *FilerEtcStore) ListUsers(ctx context.Context) ([]string, error) {
func (store *FilerEtcStore) GetUserByAccessKey(ctx context.Context, accessKey string) (*iam_pb.Identity, error) {
config, err := store.LoadConfiguration(ctx)
if err != nil {
return nil, fmt.Errorf("failed to load configuration: %v", err)
return nil, fmt.Errorf("failed to load configuration: %w", err)
}
for _, identity := range config.Identities {
@ -144,7 +144,7 @@ func (store *FilerEtcStore) GetUserByAccessKey(ctx context.Context, accessKey st
func (store *FilerEtcStore) CreateAccessKey(ctx context.Context, username string, cred *iam_pb.Credential) error {
config, err := store.LoadConfiguration(ctx)
if err != nil {
return fmt.Errorf("failed to load configuration: %v", err)
return fmt.Errorf("failed to load configuration: %w", err)
}
// Find the user and add the credential
@ -168,7 +168,7 @@ func (store *FilerEtcStore) CreateAccessKey(ctx context.Context, username string
func (store *FilerEtcStore) DeleteAccessKey(ctx context.Context, username string, accessKey string) error {
config, err := store.LoadConfiguration(ctx)
if err != nil {
return fmt.Errorf("failed to load configuration: %v", err)
return fmt.Errorf("failed to load configuration: %w", err)
}
// Find the user and remove the credential

View File

@ -31,7 +31,7 @@ func MigrateCredentials(fromStoreName, toStoreName CredentialStoreTypeName, conf
glog.Infof("Loading configuration from %s store...", fromStoreName)
config, err := fromCM.LoadConfiguration(ctx)
if err != nil {
return fmt.Errorf("failed to load configuration from source store: %v", err)
return fmt.Errorf("failed to load configuration from source store: %w", err)
}
if config == nil || len(config.Identities) == 0 {
@ -94,7 +94,7 @@ func ExportCredentials(storeName CredentialStoreTypeName, configuration util.Con
// Load configuration
config, err := cm.LoadConfiguration(ctx)
if err != nil {
return nil, fmt.Errorf("failed to load configuration: %v", err)
return nil, fmt.Errorf("failed to load configuration: %w", err)
}
return config, nil
@ -164,7 +164,7 @@ func ValidateCredentials(storeName CredentialStoreTypeName, configuration util.C
// Load configuration
config, err := cm.LoadConfiguration(ctx)
if err != nil {
return fmt.Errorf("failed to load configuration: %v", err)
return fmt.Errorf("failed to load configuration: %w", err)
}
if config == nil || len(config.Identities) == 0 {

View File

@ -20,7 +20,7 @@ func (store *PostgresStore) LoadConfiguration(ctx context.Context) (*iam_pb.S3Ap
// Query all users
rows, err := store.db.QueryContext(ctx, "SELECT username, email, account_data, actions FROM users")
if err != nil {
return nil, fmt.Errorf("failed to query users: %v", err)
return nil, fmt.Errorf("failed to query users: %w", err)
}
defer rows.Close()
@ -29,7 +29,7 @@ func (store *PostgresStore) LoadConfiguration(ctx context.Context) (*iam_pb.S3Ap
var accountDataJSON, actionsJSON []byte
if err := rows.Scan(&username, &email, &accountDataJSON, &actionsJSON); err != nil {
return nil, fmt.Errorf("failed to scan user row: %v", err)
return nil, fmt.Errorf("failed to scan user row: %w", err)
}
identity := &iam_pb.Identity{
@ -84,16 +84,16 @@ func (store *PostgresStore) SaveConfiguration(ctx context.Context, config *iam_p
// Start transaction
tx, err := store.db.BeginTx(ctx, nil)
if err != nil {
return fmt.Errorf("failed to begin transaction: %v", err)
return fmt.Errorf("failed to begin transaction: %w", err)
}
defer tx.Rollback()
// Clear existing data
if _, err := tx.ExecContext(ctx, "DELETE FROM credentials"); err != nil {
return fmt.Errorf("failed to clear credentials: %v", err)
return fmt.Errorf("failed to clear credentials: %w", err)
}
if _, err := tx.ExecContext(ctx, "DELETE FROM users"); err != nil {
return fmt.Errorf("failed to clear users: %v", err)
return fmt.Errorf("failed to clear users: %w", err)
}
// Insert all identities
@ -147,7 +147,7 @@ func (store *PostgresStore) CreateUser(ctx context.Context, identity *iam_pb.Ide
var count int
err := store.db.QueryRowContext(ctx, "SELECT COUNT(*) FROM users WHERE username = $1", identity.Name).Scan(&count)
if err != nil {
return fmt.Errorf("failed to check user existence: %v", err)
return fmt.Errorf("failed to check user existence: %w", err)
}
if count > 0 {
return credential.ErrUserAlreadyExists
@ -156,7 +156,7 @@ func (store *PostgresStore) CreateUser(ctx context.Context, identity *iam_pb.Ide
// Start transaction
tx, err := store.db.BeginTx(ctx, nil)
if err != nil {
return fmt.Errorf("failed to begin transaction: %v", err)
return fmt.Errorf("failed to begin transaction: %w", err)
}
defer tx.Rollback()
@ -165,7 +165,7 @@ func (store *PostgresStore) CreateUser(ctx context.Context, identity *iam_pb.Ide
if identity.Account != nil {
accountDataJSON, err = json.Marshal(identity.Account)
if err != nil {
return fmt.Errorf("failed to marshal account data: %v", err)
return fmt.Errorf("failed to marshal account data: %w", err)
}
}
@ -174,7 +174,7 @@ func (store *PostgresStore) CreateUser(ctx context.Context, identity *iam_pb.Ide
if identity.Actions != nil {
actionsJSON, err = json.Marshal(identity.Actions)
if err != nil {
return fmt.Errorf("failed to marshal actions: %v", err)
return fmt.Errorf("failed to marshal actions: %w", err)
}
}
@ -183,7 +183,7 @@ func (store *PostgresStore) CreateUser(ctx context.Context, identity *iam_pb.Ide
"INSERT INTO users (username, email, account_data, actions) VALUES ($1, $2, $3, $4)",
identity.Name, "", accountDataJSON, actionsJSON)
if err != nil {
return fmt.Errorf("failed to insert user: %v", err)
return fmt.Errorf("failed to insert user: %w", err)
}
// Insert credentials
@ -192,7 +192,7 @@ func (store *PostgresStore) CreateUser(ctx context.Context, identity *iam_pb.Ide
"INSERT INTO credentials (username, access_key, secret_key) VALUES ($1, $2, $3)",
identity.Name, cred.AccessKey, cred.SecretKey)
if err != nil {
return fmt.Errorf("failed to insert credential: %v", err)
return fmt.Errorf("failed to insert credential: %w", err)
}
}
@ -214,7 +214,7 @@ func (store *PostgresStore) GetUser(ctx context.Context, username string) (*iam_
if err == sql.ErrNoRows {
return nil, credential.ErrUserNotFound
}
return nil, fmt.Errorf("failed to query user: %v", err)
return nil, fmt.Errorf("failed to query user: %w", err)
}
identity := &iam_pb.Identity{
@ -224,28 +224,28 @@ func (store *PostgresStore) GetUser(ctx context.Context, username string) (*iam_
// Parse account data
if len(accountDataJSON) > 0 {
if err := json.Unmarshal(accountDataJSON, &identity.Account); err != nil {
return nil, fmt.Errorf("failed to unmarshal account data: %v", err)
return nil, fmt.Errorf("failed to unmarshal account data: %w", err)
}
}
// Parse actions
if len(actionsJSON) > 0 {
if err := json.Unmarshal(actionsJSON, &identity.Actions); err != nil {
return nil, fmt.Errorf("failed to unmarshal actions: %v", err)
return nil, fmt.Errorf("failed to unmarshal actions: %w", err)
}
}
// Query credentials
rows, err := store.db.QueryContext(ctx, "SELECT access_key, secret_key FROM credentials WHERE username = $1", username)
if err != nil {
return nil, fmt.Errorf("failed to query credentials: %v", err)
return nil, fmt.Errorf("failed to query credentials: %w", err)
}
defer rows.Close()
for rows.Next() {
var accessKey, secretKey string
if err := rows.Scan(&accessKey, &secretKey); err != nil {
return nil, fmt.Errorf("failed to scan credential: %v", err)
return nil, fmt.Errorf("failed to scan credential: %w", err)
}
identity.Credentials = append(identity.Credentials, &iam_pb.Credential{
@ -265,7 +265,7 @@ func (store *PostgresStore) UpdateUser(ctx context.Context, username string, ide
// Start transaction
tx, err := store.db.BeginTx(ctx, nil)
if err != nil {
return fmt.Errorf("failed to begin transaction: %v", err)
return fmt.Errorf("failed to begin transaction: %w", err)
}
defer tx.Rollback()
@ -273,7 +273,7 @@ func (store *PostgresStore) UpdateUser(ctx context.Context, username string, ide
var count int
err = tx.QueryRowContext(ctx, "SELECT COUNT(*) FROM users WHERE username = $1", username).Scan(&count)
if err != nil {
return fmt.Errorf("failed to check user existence: %v", err)
return fmt.Errorf("failed to check user existence: %w", err)
}
if count == 0 {
return credential.ErrUserNotFound
@ -284,7 +284,7 @@ func (store *PostgresStore) UpdateUser(ctx context.Context, username string, ide
if identity.Account != nil {
accountDataJSON, err = json.Marshal(identity.Account)
if err != nil {
return fmt.Errorf("failed to marshal account data: %v", err)
return fmt.Errorf("failed to marshal account data: %w", err)
}
}
@ -293,7 +293,7 @@ func (store *PostgresStore) UpdateUser(ctx context.Context, username string, ide
if identity.Actions != nil {
actionsJSON, err = json.Marshal(identity.Actions)
if err != nil {
return fmt.Errorf("failed to marshal actions: %v", err)
return fmt.Errorf("failed to marshal actions: %w", err)
}
}
@ -302,13 +302,13 @@ func (store *PostgresStore) UpdateUser(ctx context.Context, username string, ide
"UPDATE users SET email = $2, account_data = $3, actions = $4, updated_at = CURRENT_TIMESTAMP WHERE username = $1",
username, "", accountDataJSON, actionsJSON)
if err != nil {
return fmt.Errorf("failed to update user: %v", err)
return fmt.Errorf("failed to update user: %w", err)
}
// Delete existing credentials
_, err = tx.ExecContext(ctx, "DELETE FROM credentials WHERE username = $1", username)
if err != nil {
return fmt.Errorf("failed to delete existing credentials: %v", err)
return fmt.Errorf("failed to delete existing credentials: %w", err)
}
// Insert new credentials
@ -317,7 +317,7 @@ func (store *PostgresStore) UpdateUser(ctx context.Context, username string, ide
"INSERT INTO credentials (username, access_key, secret_key) VALUES ($1, $2, $3)",
username, cred.AccessKey, cred.SecretKey)
if err != nil {
return fmt.Errorf("failed to insert credential: %v", err)
return fmt.Errorf("failed to insert credential: %w", err)
}
}
@ -331,12 +331,12 @@ func (store *PostgresStore) DeleteUser(ctx context.Context, username string) err
result, err := store.db.ExecContext(ctx, "DELETE FROM users WHERE username = $1", username)
if err != nil {
return fmt.Errorf("failed to delete user: %v", err)
return fmt.Errorf("failed to delete user: %w", err)
}
rowsAffected, err := result.RowsAffected()
if err != nil {
return fmt.Errorf("failed to get rows affected: %v", err)
return fmt.Errorf("failed to get rows affected: %w", err)
}
if rowsAffected == 0 {
@ -353,7 +353,7 @@ func (store *PostgresStore) ListUsers(ctx context.Context) ([]string, error) {
rows, err := store.db.QueryContext(ctx, "SELECT username FROM users ORDER BY username")
if err != nil {
return nil, fmt.Errorf("failed to query users: %v", err)
return nil, fmt.Errorf("failed to query users: %w", err)
}
defer rows.Close()
@ -361,7 +361,7 @@ func (store *PostgresStore) ListUsers(ctx context.Context) ([]string, error) {
for rows.Next() {
var username string
if err := rows.Scan(&username); err != nil {
return nil, fmt.Errorf("failed to scan username: %v", err)
return nil, fmt.Errorf("failed to scan username: %w", err)
}
usernames = append(usernames, username)
}
@ -380,7 +380,7 @@ func (store *PostgresStore) GetUserByAccessKey(ctx context.Context, accessKey st
if err == sql.ErrNoRows {
return nil, credential.ErrAccessKeyNotFound
}
return nil, fmt.Errorf("failed to query access key: %v", err)
return nil, fmt.Errorf("failed to query access key: %w", err)
}
return store.GetUser(ctx, username)
@ -395,7 +395,7 @@ func (store *PostgresStore) CreateAccessKey(ctx context.Context, username string
var count int
err := store.db.QueryRowContext(ctx, "SELECT COUNT(*) FROM users WHERE username = $1", username).Scan(&count)
if err != nil {
return fmt.Errorf("failed to check user existence: %v", err)
return fmt.Errorf("failed to check user existence: %w", err)
}
if count == 0 {
return credential.ErrUserNotFound
@ -406,7 +406,7 @@ func (store *PostgresStore) CreateAccessKey(ctx context.Context, username string
"INSERT INTO credentials (username, access_key, secret_key) VALUES ($1, $2, $3)",
username, cred.AccessKey, cred.SecretKey)
if err != nil {
return fmt.Errorf("failed to insert credential: %v", err)
return fmt.Errorf("failed to insert credential: %w", err)
}
return nil
@ -421,12 +421,12 @@ func (store *PostgresStore) DeleteAccessKey(ctx context.Context, username string
"DELETE FROM credentials WHERE username = $1 AND access_key = $2",
username, accessKey)
if err != nil {
return fmt.Errorf("failed to delete access key: %v", err)
return fmt.Errorf("failed to delete access key: %w", err)
}
rowsAffected, err := result.RowsAffected()
if err != nil {
return fmt.Errorf("failed to get rows affected: %v", err)
return fmt.Errorf("failed to get rows affected: %w", err)
}
if rowsAffected == 0 {
@ -434,7 +434,7 @@ func (store *PostgresStore) DeleteAccessKey(ctx context.Context, username string
var count int
err = store.db.QueryRowContext(ctx, "SELECT COUNT(*) FROM users WHERE username = $1", username).Scan(&count)
if err != nil {
return fmt.Errorf("failed to check user existence: %v", err)
return fmt.Errorf("failed to check user existence: %w", err)
}
if count == 0 {
return credential.ErrUserNotFound

View File

@ -18,7 +18,7 @@ func (store *PostgresStore) GetPolicies(ctx context.Context) (map[string]policy_
rows, err := store.db.QueryContext(ctx, "SELECT name, document FROM policies")
if err != nil {
return nil, fmt.Errorf("failed to query policies: %v", err)
return nil, fmt.Errorf("failed to query policies: %w", err)
}
defer rows.Close()
@ -27,7 +27,7 @@ func (store *PostgresStore) GetPolicies(ctx context.Context) (map[string]policy_
var documentJSON []byte
if err := rows.Scan(&name, &documentJSON); err != nil {
return nil, fmt.Errorf("failed to scan policy row: %v", err)
return nil, fmt.Errorf("failed to scan policy row: %w", err)
}
var document policy_engine.PolicyDocument
@ -49,14 +49,14 @@ func (store *PostgresStore) CreatePolicy(ctx context.Context, name string, docum
documentJSON, err := json.Marshal(document)
if err != nil {
return fmt.Errorf("failed to marshal policy document: %v", err)
return fmt.Errorf("failed to marshal policy document: %w", err)
}
_, err = store.db.ExecContext(ctx,
"INSERT INTO policies (name, document) VALUES ($1, $2) ON CONFLICT (name) DO UPDATE SET document = $2, updated_at = CURRENT_TIMESTAMP",
name, documentJSON)
if err != nil {
return fmt.Errorf("failed to insert policy: %v", err)
return fmt.Errorf("failed to insert policy: %w", err)
}
return nil
@ -70,19 +70,19 @@ func (store *PostgresStore) UpdatePolicy(ctx context.Context, name string, docum
documentJSON, err := json.Marshal(document)
if err != nil {
return fmt.Errorf("failed to marshal policy document: %v", err)
return fmt.Errorf("failed to marshal policy document: %w", err)
}
result, err := store.db.ExecContext(ctx,
"UPDATE policies SET document = $2, updated_at = CURRENT_TIMESTAMP WHERE name = $1",
name, documentJSON)
if err != nil {
return fmt.Errorf("failed to update policy: %v", err)
return fmt.Errorf("failed to update policy: %w", err)
}
rowsAffected, err := result.RowsAffected()
if err != nil {
return fmt.Errorf("failed to get rows affected: %v", err)
return fmt.Errorf("failed to get rows affected: %w", err)
}
if rowsAffected == 0 {
@ -100,12 +100,12 @@ func (store *PostgresStore) DeletePolicy(ctx context.Context, name string) error
result, err := store.db.ExecContext(ctx, "DELETE FROM policies WHERE name = $1", name)
if err != nil {
return fmt.Errorf("failed to delete policy: %v", err)
return fmt.Errorf("failed to delete policy: %w", err)
}
rowsAffected, err := result.RowsAffected()
if err != nil {
return fmt.Errorf("failed to get rows affected: %v", err)
return fmt.Errorf("failed to get rows affected: %w", err)
}
if rowsAffected == 0 {

View File

@ -58,13 +58,13 @@ func (store *PostgresStore) Initialize(configuration util.Configuration, prefix
db, err := sql.Open("postgres", connStr)
if err != nil {
return fmt.Errorf("failed to open database: %v", err)
return fmt.Errorf("failed to open database: %w", err)
}
// Test connection
if err := db.Ping(); err != nil {
db.Close()
return fmt.Errorf("failed to ping database: %v", err)
return fmt.Errorf("failed to ping database: %w", err)
}
// Set connection pool settings
@ -77,7 +77,7 @@ func (store *PostgresStore) Initialize(configuration util.Configuration, prefix
// Create tables if they don't exist
if err := store.createTables(); err != nil {
db.Close()
return fmt.Errorf("failed to create tables: %v", err)
return fmt.Errorf("failed to create tables: %w", err)
}
store.configured = true
@ -124,15 +124,15 @@ func (store *PostgresStore) createTables() error {
// Execute table creation
if _, err := store.db.Exec(usersTable); err != nil {
return fmt.Errorf("failed to create users table: %v", err)
return fmt.Errorf("failed to create users table: %w", err)
}
if _, err := store.db.Exec(credentialsTable); err != nil {
return fmt.Errorf("failed to create credentials table: %v", err)
return fmt.Errorf("failed to create credentials table: %w", err)
}
if _, err := store.db.Exec(policiesTable); err != nil {
return fmt.Errorf("failed to create policies table: %v", err)
return fmt.Errorf("failed to create policies table: %w", err)
}
return nil

View File

@ -15,7 +15,7 @@ func (store *AbstractSqlStore) KvPut(ctx context.Context, key []byte, value []by
db, _, _, err := store.getTxOrDB(ctx, "", false)
if err != nil {
return fmt.Errorf("findDB: %v", err)
return fmt.Errorf("findDB: %w", err)
}
dirStr, dirHash, name := GenDirAndName(key)
@ -50,7 +50,7 @@ func (store *AbstractSqlStore) KvGet(ctx context.Context, key []byte) (value []b
db, _, _, err := store.getTxOrDB(ctx, "", false)
if err != nil {
return nil, fmt.Errorf("findDB: %v", err)
return nil, fmt.Errorf("findDB: %w", err)
}
dirStr, dirHash, name := GenDirAndName(key)
@ -63,7 +63,7 @@ func (store *AbstractSqlStore) KvGet(ctx context.Context, key []byte) (value []b
}
if err != nil {
return nil, fmt.Errorf("kv get: %v", err)
return nil, fmt.Errorf("kv get: %w", err)
}
return
@ -73,7 +73,7 @@ func (store *AbstractSqlStore) KvDelete(ctx context.Context, key []byte) (err er
db, _, _, err := store.getTxOrDB(ctx, "", false)
if err != nil {
return fmt.Errorf("findDB: %v", err)
return fmt.Errorf("findDB: %w", err)
}
dirStr, dirHash, name := GenDirAndName(key)

View File

@ -18,7 +18,7 @@ func (store *ArangodbStore) KvPut(ctx context.Context, key []byte, value []byte)
exists, err := store.kvCollection.DocumentExists(ctx, model.Key)
if err != nil {
return fmt.Errorf("kv put: %v", err)
return fmt.Errorf("kv put: %w", err)
}
if exists {
_, err = store.kvCollection.UpdateDocument(ctx, model.Key, model)
@ -26,7 +26,7 @@ func (store *ArangodbStore) KvPut(ctx context.Context, key []byte, value []byte)
_, err = store.kvCollection.CreateDocument(ctx, model)
}
if err != nil {
return fmt.Errorf("kv put: %v", err)
return fmt.Errorf("kv put: %w", err)
}
return nil

View File

@ -44,7 +44,7 @@ func (store *CassandraStore) KvDelete(ctx context.Context, key []byte) (err erro
if err := store.session.Query(
"DELETE FROM filemeta WHERE directory=? AND name=?",
dir, name).Exec(); err != nil {
return fmt.Errorf("kv delete: %v", err)
return fmt.Errorf("kv delete: %w", err)
}
return nil

View File

@ -45,7 +45,7 @@ func (store *Cassandra2Store) KvDelete(ctx context.Context, key []byte) (err err
if err := store.session.Query(
"DELETE FROM filemeta WHERE dirhash=? AND directory=? AND name=?",
util.HashStringToLong(dir), dir, name).Exec(); err != nil {
return fmt.Errorf("kv delete: %v", err)
return fmt.Errorf("kv delete: %w", err)
}
return nil

View File

@ -78,7 +78,7 @@ func (store *ElasticStore) initialize(options []elastic.ClientOptionFunc) (err e
ctx := context.Background()
store.client, err = elastic.NewClient(options...)
if err != nil {
return fmt.Errorf("init elastic %v", err)
return fmt.Errorf("init elastic %w", err)
}
if ok, err := store.client.IndexExists(indexKV).Do(ctx); err == nil && !ok {
_, err = store.client.CreateIndex(indexKV).Body(kvMappings).Do(ctx)
@ -114,7 +114,7 @@ func (store *ElasticStore) InsertEntry(ctx context.Context, entry *filer.Entry)
value, err := jsoniter.Marshal(esEntry)
if err != nil {
glog.ErrorfCtx(ctx, "insert entry(%s) %v.", string(entry.FullPath), err)
return fmt.Errorf("insert entry marshal %v", err)
return fmt.Errorf("insert entry marshal %w", err)
}
_, err = store.client.Index().
Index(index).
@ -124,7 +124,7 @@ func (store *ElasticStore) InsertEntry(ctx context.Context, entry *filer.Entry)
Do(ctx)
if err != nil {
glog.ErrorfCtx(ctx, "insert entry(%s) %v.", string(entry.FullPath), err)
return fmt.Errorf("insert entry %v", err)
return fmt.Errorf("insert entry %w", err)
}
return nil
}
@ -194,7 +194,7 @@ func (store *ElasticStore) deleteEntry(ctx context.Context, index, id string) (e
}
}
glog.ErrorfCtx(ctx, "delete entry(index:%s,_id:%s) %v.", index, id, err)
return fmt.Errorf("delete entry %v", err)
return fmt.Errorf("delete entry %w", err)
}
func (store *ElasticStore) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) {

View File

@ -26,7 +26,7 @@ func (store *ElasticStore) KvDelete(ctx context.Context, key []byte) (err error)
}
}
glog.ErrorfCtx(ctx, "delete key(id:%s) %v.", string(key), err)
return fmt.Errorf("delete key %v", err)
return fmt.Errorf("delete key %w", err)
}
func (store *ElasticStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
@ -53,7 +53,7 @@ func (store *ElasticStore) KvPut(ctx context.Context, key []byte, value []byte)
val, err := jsoniter.Marshal(esEntry)
if err != nil {
glog.ErrorfCtx(ctx, "insert key(%s) %v.", string(key), err)
return fmt.Errorf("insert key %v", err)
return fmt.Errorf("insert key %w", err)
}
_, err = store.client.Index().
Index(indexKV).
@ -62,7 +62,7 @@ func (store *ElasticStore) KvPut(ctx context.Context, key []byte, value []byte)
BodyJson(string(val)).
Do(ctx)
if err != nil {
return fmt.Errorf("kv put: %v", err)
return fmt.Errorf("kv put: %w", err)
}
return nil
}

View File

@ -48,7 +48,7 @@ func (store *EtcdStore) Initialize(configuration weed_util.Configuration, prefix
timeoutStr := configuration.GetString(prefix + "timeout")
timeout, err := time.ParseDuration(timeoutStr)
if err != nil {
return fmt.Errorf("parse etcd store timeout: %v", err)
return fmt.Errorf("parse etcd store timeout: %w", err)
}
store.timeout = timeout
@ -66,7 +66,7 @@ func (store *EtcdStore) Initialize(configuration weed_util.Configuration, prefix
var err error
tlsConfig, err = tlsInfo.ClientConfig()
if err != nil {
return fmt.Errorf("TLS client configuration error: %v", err)
return fmt.Errorf("TLS client configuration error: %w", err)
}
}

View File

@ -11,7 +11,7 @@ func (store *EtcdStore) KvPut(ctx context.Context, key []byte, value []byte) (er
_, err = store.client.Put(ctx, store.etcdKeyPrefix+string(key), string(value))
if err != nil {
return fmt.Errorf("kv put: %v", err)
return fmt.Errorf("kv put: %w", err)
}
return nil
@ -22,7 +22,7 @@ func (store *EtcdStore) KvGet(ctx context.Context, key []byte) (value []byte, er
resp, err := store.client.Get(ctx, store.etcdKeyPrefix+string(key))
if err != nil {
return nil, fmt.Errorf("kv get: %v", err)
return nil, fmt.Errorf("kv get: %w", err)
}
if len(resp.Kvs) == 0 {
@ -37,7 +37,7 @@ func (store *EtcdStore) KvDelete(ctx context.Context, key []byte) (err error) {
_, err = store.client.Delete(ctx, store.etcdKeyPrefix+string(key))
if err != nil {
return fmt.Errorf("kv delete: %v", err)
return fmt.Errorf("kv delete: %w", err)
}
return nil

View File

@ -220,7 +220,7 @@ func mergeIntoManifest(saveFunc SaveDataAsChunkFunctionType, dataChunks []*filer
Chunks: dataChunks,
})
if serErr != nil {
return nil, fmt.Errorf("serializing manifest: %v", serErr)
return nil, fmt.Errorf("serializing manifest: %w", serErr)
}
minOffset, maxOffset := int64(math.MaxInt64), int64(math.MinInt64)

View File

@ -113,7 +113,7 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry
glog.V(3).InfofCtx(ctx, "deleting directory %v delete chunks: %v", entry.FullPath, shouldDeleteChunks)
if storeDeletionErr := f.Store.DeleteFolderChildren(ctx, entry.FullPath); storeDeletionErr != nil {
return fmt.Errorf("filer store delete: %v", storeDeletionErr)
return fmt.Errorf("filer store delete: %w", storeDeletionErr)
}
f.NotifyUpdateEvent(ctx, entry, nil, shouldDeleteChunks, isFromOtherCluster, signatures)
@ -127,7 +127,7 @@ func (f *Filer) doDeleteEntryMetaAndData(ctx context.Context, entry *Entry, shou
glog.V(3).InfofCtx(ctx, "deleting entry %v, delete chunks: %v", entry.FullPath, shouldDeleteChunks)
if storeDeletionErr := f.Store.DeleteOneEntry(ctx, entry); storeDeletionErr != nil {
return fmt.Errorf("filer store delete: %v", storeDeletionErr)
return fmt.Errorf("filer store delete: %w", storeDeletionErr)
}
if !entry.IsDirectory() {
f.NotifyUpdateEvent(ctx, entry, nil, shouldDeleteChunks, isFromOtherCluster, signatures)

View File

@ -120,7 +120,7 @@ func (f *Filer) ReadPersistedLogBuffer(startPosition log_buffer.MessagePosition,
if visitErr == io.EOF {
return
}
err = fmt.Errorf("reading from persisted logs: %v", visitErr)
err = fmt.Errorf("reading from persisted logs: %w", visitErr)
return
}
var logEntry *filer_pb.LogEntry
@ -130,12 +130,12 @@ func (f *Filer) ReadPersistedLogBuffer(startPosition log_buffer.MessagePosition,
if visitErr == io.EOF {
break
}
err = fmt.Errorf("read next from persisted logs: %v", visitErr)
err = fmt.Errorf("read next from persisted logs: %w", visitErr)
return
}
isDone, visitErr = eachLogEntryFn(logEntry)
if visitErr != nil {
err = fmt.Errorf("process persisted log entry: %v", visitErr)
err = fmt.Errorf("process persisted log entry: %w", visitErr)
return
}
lastTsNs = logEntry.TsNs

View File

@ -60,7 +60,7 @@ func (f *Filer) assignAndUpload(targetFile string, data []byte) (*operation.Assi
assignResult, err := operation.Assign(context.Background(), f.GetMaster, f.GrpcDialOption, assignRequest)
if err != nil {
return nil, nil, fmt.Errorf("AssignVolume: %v", err)
return nil, nil, fmt.Errorf("AssignVolume: %w", err)
}
if assignResult.Error != "" {
return nil, nil, fmt.Errorf("AssignVolume error: %v", assignResult.Error)

View File

@ -33,7 +33,7 @@ func (f *Filer) collectPersistedLogBuffer(startPosition log_buffer.MessagePositi
dayEntries, _, listDayErr := f.ListDirectoryEntries(context.Background(), SystemLogDir, startDate, true, math.MaxInt32, "", "", "")
if listDayErr != nil {
return nil, fmt.Errorf("fail to list log by day: %v", listDayErr)
return nil, fmt.Errorf("fail to list log by day: %w", listDayErr)
}
return NewOrderedLogVisitor(f, startPosition, stopTsNs, dayEntries)
@ -45,7 +45,7 @@ func (f *Filer) HasPersistedLogFiles(startPosition log_buffer.MessagePosition) (
dayEntries, _, listDayErr := f.ListDirectoryEntries(context.Background(), SystemLogDir, startDate, true, 1, "", "", "")
if listDayErr != nil {
return false, fmt.Errorf("fail to list log by day: %v", listDayErr)
return false, fmt.Errorf("fail to list log by day: %w", listDayErr)
}
if len(dayEntries) == 0 {
return false, nil
@ -118,7 +118,7 @@ func (o *OrderedLogVisitor) GetNext() (logEntry *filer_pb.LogEntry, err error) {
if nextErr == io.EOF {
// do nothing since the filer has no more log entries
} else {
return nil, fmt.Errorf("failed to get next log entry: %v", nextErr)
return nil, fmt.Errorf("failed to get next log entry: %w", nextErr)
}
} else {
heap.Push(o.pq, &LogEntryItem{
@ -245,7 +245,7 @@ func (c *LogFileEntryCollector) collectMore(v *OrderedLogVisitor) (err error) {
if nextErr == io.EOF {
// do nothing since the filer has no more log entries
} else {
return fmt.Errorf("failed to get next log entry for %v: %v", entryName, err)
return fmt.Errorf("failed to get next log entry for %v: %w", entryName, err)
}
} else {
heap.Push(v.pq, &LogEntryItem{

View File

@ -48,7 +48,7 @@ func (store *HbaseStore) initialize(zkquorum, table string) (err error) {
headers := map[string][]string{store.cfMetaDir: nil}
get, err := hrpc.NewGet(context.Background(), store.table, []byte(key), hrpc.Families(headers))
if err != nil {
return fmt.Errorf("NewGet returned an error: %v", err)
return fmt.Errorf("NewGet returned an error: %w", err)
}
_, err = store.Client.Get(get)
if err != gohbase.TableNotFound {

View File

@ -12,7 +12,7 @@ func (store *LevelDBStore) KvPut(ctx context.Context, key []byte, value []byte)
err = store.db.Put(key, value, nil)
if err != nil {
return fmt.Errorf("kv put: %v", err)
return fmt.Errorf("kv put: %w", err)
}
return nil
@ -27,7 +27,7 @@ func (store *LevelDBStore) KvGet(ctx context.Context, key []byte) (value []byte,
}
if err != nil {
return nil, fmt.Errorf("kv get: %v", err)
return nil, fmt.Errorf("kv get: %w", err)
}
return
@ -38,7 +38,7 @@ func (store *LevelDBStore) KvDelete(ctx context.Context, key []byte) (err error)
err = store.db.Delete(key, nil)
if err != nil {
return fmt.Errorf("kv delete: %v", err)
return fmt.Errorf("kv delete: %w", err)
}
return nil

View File

@ -13,7 +13,7 @@ func (store *LevelDB3Store) KvPut(ctx context.Context, key []byte, value []byte)
err = store.dbs[DEFAULT].Put(key, value, nil)
if err != nil {
return fmt.Errorf("kv put: %v", err)
return fmt.Errorf("kv put: %w", err)
}
return nil
@ -28,7 +28,7 @@ func (store *LevelDB3Store) KvGet(ctx context.Context, key []byte) (value []byte
}
if err != nil {
return nil, fmt.Errorf("kv get: %v", err)
return nil, fmt.Errorf("kv get: %w", err)
}
return
@ -39,7 +39,7 @@ func (store *LevelDB3Store) KvDelete(ctx context.Context, key []byte) (err error
err = store.dbs[DEFAULT].Delete(key, nil)
if err != nil {
return fmt.Errorf("kv delete: %v", err)
return fmt.Errorf("kv delete: %w", err)
}
return nil

View File

@ -189,7 +189,7 @@ func (ma *MetaAggregator) doSubscribeToOneFiler(f *Filer, self pb.ServerAddress,
})
if err != nil {
glog.V(0).Infof("SubscribeLocalMetadata %v: %v", peer, err)
return fmt.Errorf("subscribe: %v", err)
return fmt.Errorf("subscribe: %w", err)
}
for {
@ -204,7 +204,7 @@ func (ma *MetaAggregator) doSubscribeToOneFiler(f *Filer, self pb.ServerAddress,
if err := processEventFn(resp); err != nil {
glog.V(0).Infof("SubscribeLocalMetadata process %v: %v", resp, err)
return fmt.Errorf("process %v: %v", resp, err)
return fmt.Errorf("process %v: %w", resp, err)
}
f.onMetadataChangeEvent(resp)

View File

@ -24,7 +24,7 @@ func (store *MongodbStore) KvPut(ctx context.Context, key []byte, value []byte)
_, err = c.UpdateOne(ctx, filter, update, opts)
if err != nil {
return fmt.Errorf("kv put: %v", err)
return fmt.Errorf("kv put: %w", err)
}
return nil
@ -56,7 +56,7 @@ func (store *MongodbStore) KvDelete(ctx context.Context, key []byte) (err error)
where := bson.M{"directory": dir, "name": name}
_, err = store.connect.Database(store.database).Collection(store.collectionName).DeleteOne(ctx, where)
if err != nil {
return fmt.Errorf("kv delete: %v", err)
return fmt.Errorf("kv delete: %w", err)
}
return nil

View File

@ -106,7 +106,7 @@ func (store *MysqlStore) initialize(dsn string, upsertQuery string, enableUpsert
}
cfg, err := mysql.ParseDSN(dsn)
if err != nil {
return fmt.Errorf("can not parse DSN error:%v", err)
return fmt.Errorf("can not parse DSN error:%w", err)
}
var dbErr error

View File

@ -13,7 +13,7 @@ func (store *UniversalRedisStore) KvPut(ctx context.Context, key []byte, value [
_, err = store.Client.Set(ctx, string(key), value, 0).Result()
if err != nil {
return fmt.Errorf("kv put: %v", err)
return fmt.Errorf("kv put: %w", err)
}
return nil
@ -35,7 +35,7 @@ func (store *UniversalRedisStore) KvDelete(ctx context.Context, key []byte) (err
_, err = store.Client.Del(ctx, string(key)).Result()
if err != nil {
return fmt.Errorf("kv delete: %v", err)
return fmt.Errorf("kv delete: %w", err)
}
return nil

View File

@ -13,7 +13,7 @@ func (store *UniversalRedis2Store) KvPut(ctx context.Context, key []byte, value
_, err = store.Client.Set(ctx, string(key), value, 0).Result()
if err != nil {
return fmt.Errorf("kv put: %v", err)
return fmt.Errorf("kv put: %w", err)
}
return nil
@ -35,7 +35,7 @@ func (store *UniversalRedis2Store) KvDelete(ctx context.Context, key []byte) (er
_, err = store.Client.Del(ctx, string(key)).Result()
if err != nil {
return fmt.Errorf("kv delete: %v", err)
return fmt.Errorf("kv delete: %w", err)
}
return nil

View File

@ -13,7 +13,7 @@ func (store *UniversalRedis3Store) KvPut(ctx context.Context, key []byte, value
_, err = store.Client.Set(ctx, string(key), value, 0).Result()
if err != nil {
return fmt.Errorf("kv put: %v", err)
return fmt.Errorf("kv put: %w", err)
}
return nil
@ -35,7 +35,7 @@ func (store *UniversalRedis3Store) KvDelete(ctx context.Context, key []byte) (er
_, err = store.Client.Del(ctx, string(key)).Result()
if err != nil {
return fmt.Errorf("kv delete: %v", err)
return fmt.Errorf("kv delete: %w", err)
}
return nil

View File

@ -13,7 +13,7 @@ func (store *UniversalRedisLuaStore) KvPut(ctx context.Context, key []byte, valu
_, err = store.Client.Set(ctx, string(key), value, 0).Result()
if err != nil {
return fmt.Errorf("kv put: %v", err)
return fmt.Errorf("kv put: %w", err)
}
return nil
@ -35,7 +35,7 @@ func (store *UniversalRedisLuaStore) KvDelete(ctx context.Context, key []byte) (
_, err = store.Client.Del(ctx, string(key)).Result()
if err != nil {
return fmt.Errorf("kv delete: %v", err)
return fmt.Errorf("kv delete: %w", err)
}
return nil

View File

@ -16,13 +16,13 @@ func ReadMountMappings(grpcDialOption grpc.DialOption, filerAddress pb.ServerAdd
return readErr
}); readErr != nil {
if readErr != filer_pb.ErrNotFound {
return nil, fmt.Errorf("read existing mapping: %v", readErr)
return nil, fmt.Errorf("read existing mapping: %w", readErr)
}
oldContent = nil
}
mappings, readErr = UnmarshalRemoteStorageMappings(oldContent)
if readErr != nil {
return nil, fmt.Errorf("unmarshal mappings: %v", readErr)
return nil, fmt.Errorf("unmarshal mappings: %w", readErr)
}
return
@ -38,7 +38,7 @@ func InsertMountMapping(filerClient filer_pb.FilerClient, dir string, remoteStor
})
if err != nil {
if err != filer_pb.ErrNotFound {
return fmt.Errorf("read existing mapping: %v", err)
return fmt.Errorf("read existing mapping: %w", err)
}
}
@ -53,7 +53,7 @@ func InsertMountMapping(filerClient filer_pb.FilerClient, dir string, remoteStor
return SaveInsideFiler(client, DirectoryEtcRemote, REMOTE_STORAGE_MOUNT_FILE, newContent)
})
if err != nil {
return fmt.Errorf("save mapping: %v", err)
return fmt.Errorf("save mapping: %w", err)
}
return nil
@ -69,7 +69,7 @@ func DeleteMountMapping(filerClient filer_pb.FilerClient, dir string) (err error
})
if err != nil {
if err != filer_pb.ErrNotFound {
return fmt.Errorf("read existing mapping: %v", err)
return fmt.Errorf("read existing mapping: %w", err)
}
}
@ -84,7 +84,7 @@ func DeleteMountMapping(filerClient filer_pb.FilerClient, dir string) (err error
return SaveInsideFiler(client, DirectoryEtcRemote, REMOTE_STORAGE_MOUNT_FILE, newContent)
})
if err != nil {
return fmt.Errorf("save mapping: %v", err)
return fmt.Errorf("save mapping: %w", err)
}
return nil
@ -100,7 +100,7 @@ func addRemoteStorageMapping(oldContent []byte, dir string, storageLocation *rem
mappings.Mappings[dir] = storageLocation
if newContent, err = proto.Marshal(mappings); err != nil {
return oldContent, fmt.Errorf("marshal mappings: %v", err)
return oldContent, fmt.Errorf("marshal mappings: %w", err)
}
return
@ -116,7 +116,7 @@ func removeRemoteStorageMapping(oldContent []byte, dir string) (newContent []byt
delete(mappings.Mappings, dir)
if newContent, err = proto.Marshal(mappings); err != nil {
return oldContent, fmt.Errorf("marshal mappings: %v", err)
return oldContent, fmt.Errorf("marshal mappings: %w", err)
}
return

View File

@ -230,7 +230,7 @@ func enumerate(iter *gorocksdb.Iterator, prefix, lastKey []byte, includeLastKey
}
if err := iter.Err(); err != nil {
return fmt.Errorf("prefix scan iterator: %v", err)
return fmt.Errorf("prefix scan iterator: %w", err)
}
return nil
}

View File

@ -15,7 +15,7 @@ func (store *RocksDBStore) KvPut(ctx context.Context, key []byte, value []byte)
err = store.db.Put(store.wo, key, value)
if err != nil {
return fmt.Errorf("kv put: %v", err)
return fmt.Errorf("kv put: %w", err)
}
return nil
@ -30,7 +30,7 @@ func (store *RocksDBStore) KvGet(ctx context.Context, key []byte) (value []byte,
}
if err != nil {
return nil, fmt.Errorf("kv get: %v", err)
return nil, fmt.Errorf("kv get: %w", err)
}
return
@ -41,7 +41,7 @@ func (store *RocksDBStore) KvDelete(ctx context.Context, key []byte) (err error)
err = store.db.Delete(store.wo, key)
if err != nil {
return fmt.Errorf("kv delete: %v", err)
return fmt.Errorf("kv delete: %w", err)
}
return nil

View File

@ -29,12 +29,12 @@ func ProtoToText(writer io.Writer, config proto.Message) error {
text, marshalErr := m.Marshal(config)
if marshalErr != nil {
return fmt.Errorf("marshal proto message: %v", marshalErr)
return fmt.Errorf("marshal proto message: %w", marshalErr)
}
_, writeErr := writer.Write(text)
if writeErr != nil {
return fmt.Errorf("fail to write proto message: %v", writeErr)
return fmt.Errorf("fail to write proto message: %w", writeErr)
}
return writeErr

View File

@ -134,7 +134,7 @@ func PrepareStreamContentWithThrottler(ctx context.Context, masterClient wdclien
stats.FilerRequestHistogram.WithLabelValues("chunkDownload").Observe(time.Since(start).Seconds())
if err != nil {
stats.FilerHandlerCounter.WithLabelValues("chunkDownloadError").Inc()
return fmt.Errorf("read chunk: %v", err)
return fmt.Errorf("read chunk: %w", err)
}
stats.FilerHandlerCounter.WithLabelValues("chunkDownload").Inc()
downloadThrottler.MaybeSlowdown(int64(chunkView.ViewSize))

View File

@ -51,7 +51,7 @@ func (store *TarantoolStore) Initialize(configuration weed_util.Configuration, p
timeoutStr := configuration.GetString(prefix + "timeout")
timeout, err := time.ParseDuration(timeoutStr)
if err != nil {
return fmt.Errorf("parse tarantool store timeout: %v", err)
return fmt.Errorf("parse tarantool store timeout: %w", err)
}
maxReconnects := configuration.GetInt(prefix + "maxReconnects")
@ -80,7 +80,7 @@ func (store *TarantoolStore) initialize(addresses []string, user string, passwor
ctx := context.Background()
p, err := pool.ConnectWithOpts(ctx, poolInstances, poolOpts)
if err != nil {
return fmt.Errorf("Can't create connection pool: %v", err)
return fmt.Errorf("Can't create connection pool: %w", err)
}
_, err = p.Do(tarantool.NewPingRequest(), pool.ANY).Get()

View File

@ -33,7 +33,7 @@ func (store *TarantoolStore) KvPut(ctx context.Context, key []byte, value []byte
ret := crud.Result{}
if err := store.pool.Do(req, pool.RW).GetTyped(&ret); err != nil {
return fmt.Errorf("kv put: %v", err)
return fmt.Errorf("kv put: %w", err)
}
return nil
@ -88,7 +88,7 @@ func (store *TarantoolStore) KvDelete(ctx context.Context, key []byte) (err erro
Opts(delOpts)
if _, err := store.pool.Do(req, pool.RW).Get(); err != nil {
return fmt.Errorf("kv delete: %v", err)
return fmt.Errorf("kv delete: %w", err)
}
return nil

View File

@ -140,13 +140,13 @@ func (store *YdbStore) doTxOrDB(ctx context.Context, q *string, params *table.Qu
if tx, ok := ctx.Value("tx").(query.Transaction); ok {
res, err = tx.Query(ctx, *q, query.WithParameters(params))
if err != nil {
return fmt.Errorf("execute transaction: %v", err)
return fmt.Errorf("execute transaction: %w", err)
}
} else {
err = store.DB.Query().Do(ctx, func(ctx context.Context, s query.Session) (err error) {
res, err = s.Query(ctx, *q, query.WithParameters(params), ts)
if err != nil {
return fmt.Errorf("execute statement: %v", err)
return fmt.Errorf("execute statement: %w", err)
}
return nil
}, query.WithIdempotent())
@ -158,7 +158,7 @@ func (store *YdbStore) doTxOrDB(ctx context.Context, q *string, params *table.Qu
defer func() { _ = res.Close(ctx) }()
if processResultFunc != nil {
if err = processResultFunc(res); err != nil {
return fmt.Errorf("process result: %v", err)
return fmt.Errorf("process result: %w", err)
}
}
}

View File

@ -153,5 +153,5 @@ func create(tag string, t time.Time) (f *os.File, filename string, err error) {
}
lastErr = err
}
return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr)
return nil, "", fmt.Errorf("log: cannot create log: %w", lastErr)
}

View File

@ -104,7 +104,7 @@ func (iama *IamS3ApiConfigure) PutS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfigur
func (iama *IamS3ApiConfigure) GetS3ApiConfigurationFromCredentialManager(s3cfg *iam_pb.S3ApiConfiguration) (err error) {
config, err := iama.credentialManager.LoadConfiguration(context.Background())
if err != nil {
return fmt.Errorf("failed to load configuration from credential manager: %v", err)
return fmt.Errorf("failed to load configuration from credential manager: %w", err)
}
*s3cfg = *config
return nil

View File

@ -49,7 +49,7 @@ func (wfs *WFS) saveDataAsChunk(fullPath util.FullPath) filer.SaveDataAsChunkFun
if err != nil {
glog.V(0).Infof("upload data %v: %v", filename, err)
return nil, fmt.Errorf("upload data: %v", err)
return nil, fmt.Errorf("upload data: %w", err)
}
if uploadResult.Error != "" {
glog.V(0).Infof("upload failure %v: %v", filename, err)

View File

@ -25,7 +25,7 @@ func (b *MessageQueueBroker) BrokerConnectToBalancer(brokerBalancer string, stop
return pb.WithBrokerGrpcClient(true, brokerBalancer, b.grpcDialOption, func(client mq_pb.SeaweedMessagingClient) error {
stream, err := client.PublisherToPubBalancer(context.Background())
if err != nil {
return fmt.Errorf("connect to balancer %v: %v", brokerBalancer, err)
return fmt.Errorf("connect to balancer %v: %w", brokerBalancer, err)
}
defer stream.CloseSend()
err = stream.Send(&mq_pb.PublisherToPubBalancerRequest{
@ -36,7 +36,7 @@ func (b *MessageQueueBroker) BrokerConnectToBalancer(brokerBalancer string, stop
},
})
if err != nil {
return fmt.Errorf("send init message: %v", err)
return fmt.Errorf("send init message: %w", err)
}
for {

View File

@ -66,7 +66,7 @@ func (b *MessageQueueBroker) ConfigureTopic(ctx context.Context, request *mq_pb.
// save the topic configuration on filer
if err := b.fca.SaveTopicConfToFiler(t, resp); err != nil {
return nil, fmt.Errorf("configure topic: %v", err)
return nil, fmt.Errorf("configure topic: %w", err)
}
b.PubBalancer.OnPartitionChange(request.Topic, resp.BrokerPartitionAssignments)

View File

@ -164,14 +164,14 @@ func (b *MessageQueueBroker) GetTopicConfiguration(ctx context.Context, request
if conf, createdAtNs, modifiedAtNs, err = b.fca.ReadTopicConfFromFilerWithMetadata(t); err != nil {
glog.V(0).Infof("get topic configuration %s: %v", request.Topic, err)
return nil, fmt.Errorf("failed to read topic configuration: %v", err)
return nil, fmt.Errorf("failed to read topic configuration: %w", err)
}
// Ensure topic assignments are active
err = b.ensureTopicActiveAssignments(t, conf)
if err != nil {
glog.V(0).Infof("ensure topic active assignments %s: %v", request.Topic, err)
return nil, fmt.Errorf("failed to ensure topic assignments: %v", err)
return nil, fmt.Errorf("failed to ensure topic assignments: %w", err)
}
// Build the response with complete configuration including metadata
@ -208,7 +208,7 @@ func (b *MessageQueueBroker) GetTopicPublishers(ctx context.Context, request *mq
var conf *mq_pb.ConfigureTopicResponse
if conf, _, _, err = b.fca.ReadTopicConfFromFilerWithMetadata(t); err != nil {
glog.V(0).Infof("get topic configuration for publishers %s: %v", request.Topic, err)
return nil, fmt.Errorf("failed to read topic configuration: %v", err)
return nil, fmt.Errorf("failed to read topic configuration: %w", err)
}
// Collect publishers from each partition that is hosted on this broker
@ -262,7 +262,7 @@ func (b *MessageQueueBroker) GetTopicSubscribers(ctx context.Context, request *m
var conf *mq_pb.ConfigureTopicResponse
if conf, _, _, err = b.fca.ReadTopicConfFromFilerWithMetadata(t); err != nil {
glog.V(0).Infof("get topic configuration for subscribers %s: %v", request.Topic, err)
return nil, fmt.Errorf("failed to read topic configuration: %v", err)
return nil, fmt.Errorf("failed to read topic configuration: %w", err)
}
// Collect subscribers from each partition that is hosted on this broker

View File

@ -145,7 +145,7 @@ func (b *MessageQueueBroker) PublishMessage(stream mq_pb.SeaweedMessaging_Publis
// send to the local partition
if err = localTopicPartition.Publish(dataMessage); err != nil {
return fmt.Errorf("topic %v partition %v publish error: %v", initMessage.Topic, initMessage.Partition, err)
return fmt.Errorf("topic %v partition %v publish error: %w", initMessage.Topic, initMessage.Partition, err)
}
// Update published offset and last seen time for this publisher

View File

@ -15,7 +15,7 @@ func (b *MessageQueueBroker) PublisherToPubBalancer(stream mq_pb.SeaweedMessagin
}
req, err := stream.Recv()
if err != nil {
return fmt.Errorf("receive init message: %v", err)
return fmt.Errorf("receive init message: %w", err)
}
// process init message

View File

@ -14,12 +14,12 @@ func (b *MessageQueueBroker) GetOrGenerateLocalPartition(t topic.Topic, partitio
conf, readConfErr := b.fca.ReadTopicConfFromFiler(t)
if readConfErr != nil {
glog.Errorf("topic %v not found: %v", t, readConfErr)
return nil, fmt.Errorf("topic %v not found: %v", t, readConfErr)
return nil, fmt.Errorf("topic %v not found: %w", t, readConfErr)
}
localTopicPartition, _, getOrGenError = b.doGetOrGenLocalPartition(t, partition, conf)
if getOrGenError != nil {
glog.Errorf("topic %v partition %v not setup: %v", t, partition, getOrGenError)
return nil, fmt.Errorf("topic %v partition %v not setup: %v", t, partition, getOrGenError)
return nil, fmt.Errorf("topic %v partition %v not setup: %w", t, partition, getOrGenError)
}
return localTopicPartition, nil
}

View File

@ -44,13 +44,13 @@ func NewPublishSession(agentAddress string, topicSchema *schema.Schema, partitio
stream, err := agentClient.PublishRecord(context.Background())
if err != nil {
return nil, fmt.Errorf("publish record: %v", err)
return nil, fmt.Errorf("publish record: %w", err)
}
if err = stream.Send(&mq_agent_pb.PublishRecordRequest{
SessionId: resp.SessionId,
}); err != nil {
return nil, fmt.Errorf("send session id: %v", err)
return nil, fmt.Errorf("send session id: %w", err)
}
return &PublishSession{
@ -67,7 +67,7 @@ func (a *PublishSession) CloseSession() error {
}
err := a.stream.CloseSend()
if err != nil {
return fmt.Errorf("close send: %v", err)
return fmt.Errorf("close send: %w", err)
}
a.schema = nil
return err

View File

@ -50,13 +50,13 @@ func NewSubscribeSession(agentAddress string, option *SubscribeOption) (*Subscri
stream, err := agentClient.SubscribeRecord(context.Background())
if err != nil {
return nil, fmt.Errorf("subscribe record: %v", err)
return nil, fmt.Errorf("subscribe record: %w", err)
}
if err = stream.Send(&mq_agent_pb.SubscribeRecordRequest{
Init: initRequest,
}); err != nil {
return nil, fmt.Errorf("send session id: %v", err)
return nil, fmt.Errorf("send session id: %w", err)
}
return &SubscribeSession{

View File

@ -38,7 +38,7 @@ func (p *TopicPublisher) PublishRecord(key []byte, recordValue *schema_pb.Record
// serialize record value
value, err := proto.Marshal(recordValue)
if err != nil {
return fmt.Errorf("failed to marshal record value: %v", err)
return fmt.Errorf("failed to marshal record value: %w", err)
}
return p.doPublish(key, value)

View File

@ -137,7 +137,7 @@ func (p *TopicPublisher) doPublishToPartition(job *EachPartitionPublishJob) erro
brokerClient := mq_pb.NewSeaweedMessagingClient(grpcConnection)
stream, err := brokerClient.PublishMessage(context.Background())
if err != nil {
return fmt.Errorf("create publish client: %v", err)
return fmt.Errorf("create publish client: %w", err)
}
publishClient := &PublishClient{
SeaweedMessaging_PublishMessageClient: stream,
@ -154,12 +154,12 @@ func (p *TopicPublisher) doPublishToPartition(job *EachPartitionPublishJob) erro
},
},
}); err != nil {
return fmt.Errorf("send init message: %v", err)
return fmt.Errorf("send init message: %w", err)
}
// process the hello message
resp, err := stream.Recv()
if err != nil {
return fmt.Errorf("recv init response: %v", err)
return fmt.Errorf("recv init response: %w", err)
}
if resp.Error != "" {
return fmt.Errorf("init response error: %v", resp.Error)
@ -208,7 +208,7 @@ func (p *TopicPublisher) doPublishToPartition(job *EachPartitionPublishJob) erro
Data: data,
},
}); err != nil {
return fmt.Errorf("send publish data: %v", err)
return fmt.Errorf("send publish data: %w", err)
}
publishCounter++
atomic.StoreInt64(&publishedTsNs, data.TsNs)
@ -218,7 +218,7 @@ func (p *TopicPublisher) doPublishToPartition(job *EachPartitionPublishJob) erro
} else {
// CloseSend would cancel the context on the server side
if err := publishClient.CloseSend(); err != nil {
return fmt.Errorf("close send: %v", err)
return fmt.Errorf("close send: %w", err)
}
}

View File

@ -22,7 +22,7 @@ func (sub *TopicSubscriber) onEachPartition(assigned *mq_pb.BrokerPartitionAssig
subscribeClient, err := client.SubscribeMessage(context.Background())
if err != nil {
return fmt.Errorf("create subscribe client: %v", err)
return fmt.Errorf("create subscribe client: %w", err)
}
slidingWindowSize := sub.SubscriberConfig.SlidingWindowSize
@ -94,7 +94,7 @@ func (sub *TopicSubscriber) onEachPartition(assigned *mq_pb.BrokerPartitionAssig
if errors.Is(err, io.EOF) {
return nil
}
return fmt.Errorf("subscribe recv: %v", err)
return fmt.Errorf("subscribe recv: %w", err)
}
if resp.Message == nil {
glog.V(0).Infof("subscriber %s/%s received nil message", sub.ContentConfig.Topic, sub.SubscriberConfig.ConsumerGroup)

View File

@ -31,7 +31,7 @@ func CompactTopicPartitions(filerClient filer_pb.FilerClient, t topic.Topic, tim
// list the topic partition versions
topicVersions, err := collectTopicVersions(filerClient, t, timeAgo)
if err != nil {
return fmt.Errorf("list topic files: %v", err)
return fmt.Errorf("list topic files: %w", err)
}
// compact the partitions
@ -120,7 +120,7 @@ func compactTopicPartitionDir(filerClient filer_pb.FilerClient, topicName, parti
// create a parquet schema
parquetSchema, err := schema.ToParquetSchema(topicName, recordType)
if err != nil {
return fmt.Errorf("ToParquetSchema failed: %v", err)
return fmt.Errorf("ToParquetSchema failed: %w", err)
}
// TODO parallelize the writing
@ -210,7 +210,7 @@ func writeLogFilesToParquet(filerClient filer_pb.FilerClient, partitionDir strin
tempFile, err := os.CreateTemp(".", "t*.parquet")
if err != nil {
return fmt.Errorf("create temp file: %v", err)
return fmt.Errorf("create temp file: %w", err)
}
defer func() {
tempFile.Close()
@ -241,7 +241,7 @@ func writeLogFilesToParquet(filerClient filer_pb.FilerClient, partitionDir strin
record := &schema_pb.RecordValue{}
if err := proto.Unmarshal(entry.Data, record); err != nil {
return fmt.Errorf("unmarshal record value: %v", err)
return fmt.Errorf("unmarshal record value: %w", err)
}
record.Fields[SW_COLUMN_NAME_TS] = &schema_pb.Value{
@ -256,7 +256,7 @@ func writeLogFilesToParquet(filerClient filer_pb.FilerClient, partitionDir strin
}
if err := schema.AddRecordValue(rowBuilder, recordType, parquetLevels, record); err != nil {
return fmt.Errorf("add record value: %v", err)
return fmt.Errorf("add record value: %w", err)
}
rows = append(rows, rowBuilder.Row())
@ -264,18 +264,18 @@ func writeLogFilesToParquet(filerClient filer_pb.FilerClient, partitionDir strin
return nil
}); err != nil {
return fmt.Errorf("iterate log entry %v/%v: %v", partitionDir, logFile.Name, err)
return fmt.Errorf("iterate log entry %v/%v: %w", partitionDir, logFile.Name, err)
}
fmt.Printf("processed %d rows\n", len(rows))
if _, err := writer.WriteRows(rows); err != nil {
return fmt.Errorf("write rows: %v", err)
return fmt.Errorf("write rows: %w", err)
}
}
if err := writer.Close(); err != nil {
return fmt.Errorf("close writer: %v", err)
return fmt.Errorf("close writer: %w", err)
}
// write to parquet file to partitionDir
@ -291,13 +291,13 @@ func writeLogFilesToParquet(filerClient filer_pb.FilerClient, partitionDir strin
func saveParquetFileToPartitionDir(filerClient filer_pb.FilerClient, sourceFile *os.File, partitionDir, parquetFileName string, preference *operation.StoragePreference, startTsNs, stopTsNs int64) error {
uploader, err := operation.NewUploader()
if err != nil {
return fmt.Errorf("new uploader: %v", err)
return fmt.Errorf("new uploader: %w", err)
}
// get file size
fileInfo, err := sourceFile.Stat()
if err != nil {
return fmt.Errorf("stat source file: %v", err)
return fmt.Errorf("stat source file: %w", err)
}
// upload file in chunks
@ -360,7 +360,7 @@ func saveParquetFileToPartitionDir(filerClient filer_pb.FilerClient, sourceFile
Entry: entry,
})
}); err != nil {
return fmt.Errorf("create entry: %v", err)
return fmt.Errorf("create entry: %w", err)
}
fmt.Printf("saved to %s/%s\n", partitionDir, parquetFileName)
@ -436,12 +436,12 @@ func eachChunk(buf []byte, eachLogEntryFn log_buffer.EachLogEntryFuncType) (proc
logEntry := &filer_pb.LogEntry{}
if err = proto.Unmarshal(entryData, logEntry); err != nil {
pos += 4 + int(size)
err = fmt.Errorf("unexpected unmarshal mq_pb.Message: %v", err)
err = fmt.Errorf("unexpected unmarshal mq_pb.Message: %w", err)
return
}
if _, err = eachLogEntryFn(logEntry); err != nil {
err = fmt.Errorf("process log entry %v: %v", logEntry, err)
err = fmt.Errorf("process log entry %v: %w", logEntry, err)
return
}

View File

@ -34,7 +34,7 @@ func GenLogOnDiskReadFunc(filerClient filer_pb.FilerClient, t topic.Topic, p top
logEntry := &filer_pb.LogEntry{}
if err = proto.Unmarshal(entryData, logEntry); err != nil {
pos += 4 + int(size)
err = fmt.Errorf("unexpected unmarshal mq_pb.Message: %v", err)
err = fmt.Errorf("unexpected unmarshal mq_pb.Message: %w", err)
return
}
if logEntry.TsNs <= starTsNs {
@ -48,7 +48,7 @@ func GenLogOnDiskReadFunc(filerClient filer_pb.FilerClient, t topic.Topic, p top
// fmt.Printf(" read logEntry: %v, ts %v\n", string(logEntry.Key), time.Unix(0, logEntry.TsNs).UTC())
if _, err = eachLogEntryFn(logEntry); err != nil {
err = fmt.Errorf("process log entry %v: %v", logEntry, err)
err = fmt.Errorf("process log entry %v: %w", logEntry, err)
return
}

View File

@ -69,7 +69,7 @@ func GenParquetReadFunc(filerClient filer_pb.FilerClient, t topic.Topic, p topic
// convert parquet row to schema_pb.RecordValue
recordValue, err := schema.ToRecordValue(recordType, parquetLevels, row)
if err != nil {
return processedTsNs, fmt.Errorf("ToRecordValue failed: %v", err)
return processedTsNs, fmt.Errorf("ToRecordValue failed: %w", err)
}
processedTsNs = recordValue.Fields[SW_COLUMN_NAME_TS].GetInt64Value()
if processedTsNs <= starTsNs {
@ -81,7 +81,7 @@ func GenParquetReadFunc(filerClient filer_pb.FilerClient, t topic.Topic, p topic
data, marshalErr := proto.Marshal(recordValue)
if marshalErr != nil {
return processedTsNs, fmt.Errorf("marshal record value: %v", marshalErr)
return processedTsNs, fmt.Errorf("marshal record value: %w", marshalErr)
}
logEntry := &filer_pb.LogEntry{
@ -93,7 +93,7 @@ func GenParquetReadFunc(filerClient filer_pb.FilerClient, t topic.Topic, p topic
// fmt.Printf(" parquet entry %s ts %v\n", string(logEntry.Key), time.Unix(0, logEntry.TsNs).UTC())
if _, err = eachLogEntryFn(logEntry); err != nil {
return processedTsNs, fmt.Errorf("process log entry %v: %v", logEntry, err)
return processedTsNs, fmt.Errorf("process log entry %v: %w", logEntry, err)
}
}

View File

@ -9,7 +9,7 @@ import (
func ToParquetSchema(topicName string, recordType *schema_pb.RecordType) (*parquet.Schema, error) {
rootNode, err := toParquetFieldTypeRecord(recordType)
if err != nil {
return nil, fmt.Errorf("failed to convert record type to parquet schema: %v", err)
return nil, fmt.Errorf("failed to convert record type to parquet schema: %w", err)
}
// Fields are sorted by name, so the value should be sorted also

View File

@ -155,7 +155,7 @@ func (p *LocalPartition) MaybeConnectToFollowers(initMessage *mq_pb.PublishMessa
followerClient := mq_pb.NewSeaweedMessagingClient(p.followerGrpcConnection)
p.publishFolloweMeStream, err = followerClient.PublishFollowMe(ctx)
if err != nil {
return fmt.Errorf("fail to create publish client: %v", err)
return fmt.Errorf("fail to create publish client: %w", err)
}
if err = p.publishFolloweMeStream.Send(&mq_pb.PublishFollowMeRequest{
Message: &mq_pb.PublishFollowMeRequest_Init{

View File

@ -52,12 +52,12 @@ func (t Topic) ReadConfFile(client filer_pb.SeaweedFilerClient) (*mq_pb.Configur
return nil, err
}
if err != nil {
return nil, fmt.Errorf("read topic.conf of %v: %v", t, err)
return nil, fmt.Errorf("read topic.conf of %v: %w", t, err)
}
// parse into filer conf object
conf := &mq_pb.ConfigureTopicResponse{}
if err = jsonpb.Unmarshal(data, conf); err != nil {
return nil, fmt.Errorf("unmarshal topic %v conf: %v", t, err)
return nil, fmt.Errorf("unmarshal topic %v conf: %w", t, err)
}
return conf, nil
}
@ -75,7 +75,7 @@ func (t Topic) ReadConfFileWithMetadata(client filer_pb.SeaweedFilerClient) (*mq
if errors.Is(err, filer_pb.ErrNotFound) {
return nil, 0, 0, err
}
return nil, 0, 0, fmt.Errorf("lookup topic.conf of %v: %v", t, err)
return nil, 0, 0, fmt.Errorf("lookup topic.conf of %v: %w", t, err)
}
// Get file metadata
@ -88,7 +88,7 @@ func (t Topic) ReadConfFileWithMetadata(client filer_pb.SeaweedFilerClient) (*mq
// Parse the configuration
conf := &mq_pb.ConfigureTopicResponse{}
if err = jsonpb.Unmarshal(resp.Entry.Content, conf); err != nil {
return nil, 0, 0, fmt.Errorf("unmarshal topic %v conf: %v", t, err)
return nil, 0, 0, fmt.Errorf("unmarshal topic %v conf: %w", t, err)
}
return conf, createdAtNs, modifiedAtNs, nil
@ -98,7 +98,7 @@ func (t Topic) WriteConfFile(client filer_pb.SeaweedFilerClient, conf *mq_pb.Con
var buf bytes.Buffer
filer.ProtoToText(&buf, conf)
if err := filer.SaveInsideFiler(client, t.Dir(), filer.TopicConfFile, buf.Bytes()); err != nil {
return fmt.Errorf("save topic %v conf: %v", t, err)
return fmt.Errorf("save topic %v conf: %w", t, err)
}
return nil
}

View File

@ -49,7 +49,7 @@ func (k *AwsSqsPub) initialize(awsAccessKeyId, awsSecretAccessKey, region, queue
sess, err := session.NewSession(config)
if err != nil {
return fmt.Errorf("create aws session: %v", err)
return fmt.Errorf("create aws session: %w", err)
}
k.svc = sqs.New(sess)

View File

@ -32,7 +32,7 @@ func (h *httpClient) sendMessage(message *webhookMessage) error {
// Serialize the protobuf message to JSON for HTTP payload
notificationData, err := json.Marshal(message.Notification)
if err != nil {
return fmt.Errorf("failed to marshal notification: %v", err)
return fmt.Errorf("failed to marshal notification: %w", err)
}
payload := map[string]interface{}{
@ -43,12 +43,12 @@ func (h *httpClient) sendMessage(message *webhookMessage) error {
jsonData, err := json.Marshal(payload)
if err != nil {
return fmt.Errorf("failed to marshal message: %v", err)
return fmt.Errorf("failed to marshal message: %w", err)
}
req, err := http.NewRequest(http.MethodPost, h.endpoint, bytes.NewBuffer(jsonData))
if err != nil {
return fmt.Errorf("failed to create request: %v", err)
return fmt.Errorf("failed to create request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
@ -68,7 +68,7 @@ func (h *httpClient) sendMessage(message *webhookMessage) error {
glog.Errorf("failed to drain response: %v", err)
}
return fmt.Errorf("failed to send request: %v", err)
return fmt.Errorf("failed to send request: %w", err)
}
defer resp.Body.Close()

View File

@ -120,7 +120,7 @@ func (w *Queue) setupWatermillQueue(cfg *config) error {
logger,
)
if err != nil {
return fmt.Errorf("failed to create router: %v", err)
return fmt.Errorf("failed to create router: %w", err)
}
w.router = router
@ -135,7 +135,7 @@ func (w *Queue) setupWatermillQueue(cfg *config) error {
poisonQueue, err := middleware.PoisonQueue(w.queueChannel, deadLetterTopic)
if err != nil {
return fmt.Errorf("failed to create poison queue: %v", err)
return fmt.Errorf("failed to create poison queue: %w", err)
}
router.AddPlugin(plugin.SignalsHandler)

View File

@ -80,7 +80,7 @@ func (ap *singleThreadAssignProxy) doAssign(grpcConnection *grpc.ClientConn, pri
ap.assignClient, err = client.StreamAssign(context.Background())
if err != nil {
ap.assignClient = nil
return nil, fmt.Errorf("fail to create stream assign client: %v", err)
return nil, fmt.Errorf("fail to create stream assign client: %w", err)
}
}
@ -105,7 +105,7 @@ func (ap *singleThreadAssignProxy) doAssign(grpcConnection *grpc.ClientConn, pri
WritableVolumeCount: request.WritableVolumeCount,
}
if err = ap.assignClient.Send(req); err != nil {
return nil, fmt.Errorf("StreamAssignSend: %v", err)
return nil, fmt.Errorf("StreamAssignSend: %w", err)
}
resp, grpcErr := ap.assignClient.Recv()
if grpcErr != nil {

View File

@ -83,7 +83,7 @@ func (cm *ChunkManifest) DeleteChunks(masterFn GetMasterFn, usePublicUrl bool, g
results, err := DeleteFileIds(masterFn, usePublicUrl, grpcDialOption, fileIds)
if err != nil {
glog.V(0).Infof("delete %+v: %v", fileIds, err)
return fmt.Errorf("chunk delete: %v", err)
return fmt.Errorf("chunk delete: %w", err)
}
for _, result := range results {
if result.Error != "" {

View File

@ -129,7 +129,7 @@ func (uploader *Uploader) UploadWithRetry(filerClient filer_pb.FilerClient, assi
return nil
}); grpcAssignErr != nil {
return fmt.Errorf("filerGrpcAddress assign volume: %v", grpcAssignErr)
return fmt.Errorf("filerGrpcAddress assign volume: %w", grpcAssignErr)
}
uploadOption.UploadUrl = genFileUrlFn(host, fileId)
@ -171,7 +171,7 @@ func (uploader *Uploader) doUpload(ctx context.Context, reader io.Reader, option
} else {
data, err = io.ReadAll(reader)
if err != nil {
err = fmt.Errorf("read input: %v", err)
err = fmt.Errorf("read input: %w", err)
return
}
}
@ -245,7 +245,7 @@ func (uploader *Uploader) doUploadData(ctx context.Context, data []byte, option
cipherKey := util.GenCipherKey()
encryptedData, encryptionErr := util.Encrypt(data, cipherKey)
if encryptionErr != nil {
err = fmt.Errorf("encrypt input: %v", encryptionErr)
err = fmt.Errorf("encrypt input: %w", encryptionErr)
return
}
@ -389,13 +389,13 @@ func (uploader *Uploader) upload_content(ctx context.Context, fillBufferFunction
resp_body, ra_err := io.ReadAll(resp.Body)
if ra_err != nil {
return nil, fmt.Errorf("read response body %v: %v", option.UploadUrl, ra_err)
return nil, fmt.Errorf("read response body %v: %w", option.UploadUrl, ra_err)
}
unmarshal_err := json.Unmarshal(resp_body, &ret)
if unmarshal_err != nil {
glog.ErrorfCtx(ctx, "unmarshal %s: %v", option.UploadUrl, string(resp_body))
return nil, fmt.Errorf("unmarshal %v: %v", option.UploadUrl, unmarshal_err)
return nil, fmt.Errorf("unmarshal %v: %w", option.UploadUrl, unmarshal_err)
}
if ret.Error != "" {
return nil, fmt.Errorf("unmarshalled error %v: %v", option.UploadUrl, ret.Error)

View File

@ -77,7 +77,7 @@ func StreamBfs(client SeaweedFilerClient, dir util.FullPath, olderThanTsNs int64
Directory: string(dir),
})
if err != nil {
return fmt.Errorf("traverse bfs metadata: %v", err)
return fmt.Errorf("traverse bfs metadata: %w", err)
}
for {
resp, err := stream.Recv()
@ -85,7 +85,7 @@ func StreamBfs(client SeaweedFilerClient, dir util.FullPath, olderThanTsNs int64
if err == io.EOF {
break
}
return fmt.Errorf("traverse bfs metadata: %v", err)
return fmt.Errorf("traverse bfs metadata: %w", err)
}
if err := fn(util.FullPath(resp.Directory), resp.Entry); err != nil {
return err

View File

@ -112,7 +112,7 @@ func CreateEntry(ctx context.Context, client SeaweedFilerClient, request *Create
resp, err := client.CreateEntry(ctx, request)
if err != nil {
glog.V(1).InfofCtx(ctx, "create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, err)
return fmt.Errorf("CreateEntry: %v", err)
return fmt.Errorf("CreateEntry: %w", err)
}
if resp.Error != "" {
glog.V(1).InfofCtx(ctx, "create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, resp.Error)
@ -125,7 +125,7 @@ func UpdateEntry(ctx context.Context, client SeaweedFilerClient, request *Update
_, err := client.UpdateEntry(ctx, request)
if err != nil {
glog.V(1).InfofCtx(ctx, "update entry %s/%s :%v", request.Directory, request.Entry.Name, err)
return fmt.Errorf("UpdateEntry: %v", err)
return fmt.Errorf("UpdateEntry: %w", err)
}
return nil
}
@ -137,7 +137,7 @@ func LookupEntry(ctx context.Context, client SeaweedFilerClient, request *Lookup
return nil, ErrNotFound
}
glog.V(3).InfofCtx(ctx, "read %s/%v: %v", request.Directory, request.Name, err)
return nil, fmt.Errorf("LookupEntry1: %v", err)
return nil, fmt.Errorf("LookupEntry1: %w", err)
}
if resp.Entry == nil {
return nil, ErrNotFound

View File

@ -41,7 +41,7 @@ func FollowMetadata(filerAddress ServerAddress, grpcDialOption grpc.DialOption,
err := WithFilerClient(true, option.SelfSignature, filerAddress, grpcDialOption, makeSubscribeMetadataFunc(option, processEventFn))
if err != nil {
return fmt.Errorf("subscribing filer meta change: %v", err)
return fmt.Errorf("subscribing filer meta change: %w", err)
}
return err
}
@ -50,7 +50,7 @@ func WithFilerClientFollowMetadata(filerClient filer_pb.FilerClient, option *Met
err := filerClient.WithFilerClient(true, makeSubscribeMetadataFunc(option, processEventFn))
if err != nil {
return fmt.Errorf("subscribing filer meta change: %v", err)
return fmt.Errorf("subscribing filer meta change: %w", err)
}
return nil
@ -72,7 +72,7 @@ func makeSubscribeMetadataFunc(option *MetadataFollowOption, processEventFn Proc
UntilNs: option.StopTsNs,
})
if err != nil {
return fmt.Errorf("subscribe: %v", err)
return fmt.Errorf("subscribe: %w", err)
}
for {

View File

@ -200,7 +200,7 @@ func ParseServerAddress(server string, deltaPort int) (newServerAddress string,
host, port, parseErr := hostAndPort(server)
if parseErr != nil {
return "", fmt.Errorf("server port parse error: %v", parseErr)
return "", fmt.Errorf("server port parse error: %w", parseErr)
}
newPort := int(port) + deltaPort
@ -215,7 +215,7 @@ func hostAndPort(address string) (host string, port uint64, err error) {
}
port, err = strconv.ParseUint(address[colonIndex+1:], 10, 64)
if err != nil {
return "", 0, fmt.Errorf("server port parse error: %v", err)
return "", 0, fmt.Errorf("server port parse error: %w", err)
}
return address[:colonIndex], port, err

View File

@ -56,7 +56,7 @@ func (s gcsRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_storage.
c, err := storage.NewClient(context.Background(), option.WithCredentialsFile(googleApplicationCredentials))
if err != nil {
return nil, fmt.Errorf("failed to create client: %v", err)
return nil, fmt.Errorf("failed to create client: %w", err)
}
client.client = c

View File

@ -42,7 +42,7 @@ func (s AliyunRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_stora
sess, err := session.NewSession(config)
if err != nil {
return nil, fmt.Errorf("create aliyun session: %v", err)
return nil, fmt.Errorf("create aliyun session: %w", err)
}
sess.Handlers.Build.PushFront(skipSha256PayloadSigning)
client.conn = s3.New(sess)

View File

@ -37,7 +37,7 @@ func (s BackBlazeRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_st
sess, err := session.NewSession(config)
if err != nil {
return nil, fmt.Errorf("create backblaze session: %v", err)
return nil, fmt.Errorf("create backblaze session: %w", err)
}
sess.Handlers.Build.PushFront(skipSha256PayloadSigning)
client.conn = s3.New(sess)

View File

@ -42,7 +42,7 @@ func (s BaiduRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_storag
sess, err := session.NewSession(config)
if err != nil {
return nil, fmt.Errorf("create baidu session: %v", err)
return nil, fmt.Errorf("create baidu session: %w", err)
}
sess.Handlers.Build.PushFront(skipSha256PayloadSigning)
client.conn = s3.New(sess)

View File

@ -43,7 +43,7 @@ func (s ContaboRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_stor
sess, err := session.NewSession(config)
if err != nil {
return nil, fmt.Errorf("create contabo session: %v", err)
return nil, fmt.Errorf("create contabo session: %w", err)
}
sess.Handlers.Build.PushFront(skipSha256PayloadSigning)
client.conn = s3.New(sess)

View File

@ -43,7 +43,7 @@ func (s FilebaseRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_sto
sess, err := session.NewSession(config)
if err != nil {
return nil, fmt.Errorf("create filebase session: %v", err)
return nil, fmt.Errorf("create filebase session: %w", err)
}
sess.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
sess.Handlers.Build.PushFront(skipSha256PayloadSigning)

View File

@ -48,7 +48,7 @@ func (s s3RemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_storage.R
sess, err := session.NewSession(config)
if err != nil {
return nil, fmt.Errorf("create aws session: %v", err)
return nil, fmt.Errorf("create aws session: %w", err)
}
if conf.S3V4Signature {
sess.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
@ -108,10 +108,10 @@ func (s *s3RemoteStorageClient) Traverse(remote *remote_pb.RemoteStorageLocation
return true
})
if listErr != nil {
err = fmt.Errorf("list %v: %v", remote, listErr)
err = fmt.Errorf("list %v: %w", remote, listErr)
}
if localErr != nil {
err = fmt.Errorf("process %v: %v", remote, localErr)
err = fmt.Errorf("process %v: %w", remote, localErr)
}
}
return
@ -252,7 +252,7 @@ func (s *s3RemoteStorageClient) DeleteFile(loc *remote_pb.RemoteStorageLocation)
func (s *s3RemoteStorageClient) ListBuckets() (buckets []*remote_storage.Bucket, err error) {
resp, err := s.conn.ListBuckets(&s3.ListBucketsInput{})
if err != nil {
return nil, fmt.Errorf("list buckets: %v", err)
return nil, fmt.Errorf("list buckets: %w", err)
}
for _, b := range resp.Buckets {
buckets = append(buckets, &remote_storage.Bucket{

View File

@ -42,7 +42,7 @@ func (s StorjRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_storag
sess, err := session.NewSession(config)
if err != nil {
return nil, fmt.Errorf("create storj session: %v", err)
return nil, fmt.Errorf("create storj session: %w", err)
}
sess.Handlers.Build.PushFront(skipSha256PayloadSigning)
client.conn = s3.New(sess)

View File

@ -42,7 +42,7 @@ func (s TencentRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_stor
sess, err := session.NewSession(config)
if err != nil {
return nil, fmt.Errorf("create tencent session: %v", err)
return nil, fmt.Errorf("create tencent session: %w", err)
}
sess.Handlers.Build.PushFront(skipSha256PayloadSigning)
client.conn = s3.New(sess)

View File

@ -42,7 +42,7 @@ func (s WasabiRemoteStorageMaker) Make(conf *remote_pb.RemoteConf) (remote_stora
sess, err := session.NewSession(config)
if err != nil {
return nil, fmt.Errorf("create wasabi session: %v", err)
return nil, fmt.Errorf("create wasabi session: %w", err)
}
sess.Handlers.Build.PushFront(skipSha256PayloadSigning)
client.conn = s3.New(sess)

View File

@ -84,7 +84,7 @@ func (r *Replicator) Replicate(ctx context.Context, key string, message *filer_p
err = r.sink.DeleteEntry(key, message.OldEntry.IsDirectory, false, message.Signatures)
if err != nil {
return fmt.Errorf("delete old entry %v: %v", key, err)
return fmt.Errorf("delete old entry %v: %w", key, err)
}
glog.V(4).Infof("creating missing %v", key)

View File

@ -94,7 +94,7 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk, path string)
uploader, err := operation.NewUploader()
if err != nil {
glog.V(0).Infof("upload source data %v: %v", sourceChunk.GetFileIdString(), err)
return "", fmt.Errorf("upload data: %v", err)
return "", fmt.Errorf("upload data: %w", err)
}
fileId, uploadResult, err, _ := uploader.UploadWithRetry(
@ -128,7 +128,7 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk, path string)
if err != nil {
glog.V(0).Infof("upload source data %v: %v", sourceChunk.GetFileIdString(), err)
return "", fmt.Errorf("upload data: %v", err)
return "", fmt.Errorf("upload data: %w", err)
}
if uploadResult.Error != "" {
glog.V(0).Infof("upload failure %v: %v", filename, err)

View File

@ -118,7 +118,7 @@ func (s3sink *S3Sink) initialize(awsAccessKeyId, awsSecretAccessKey string) erro
sess, err := session.NewSession(config)
if err != nil {
return fmt.Errorf("create aws session: %v", err)
return fmt.Errorf("create aws session: %w", err)
}
s3sink.conn = s3.New(sess)

View File

@ -50,7 +50,7 @@ func (k *AwsSqsInput) initialize(awsAccessKeyId, awsSecretAccessKey, region, que
sess, err := session.NewSession(config)
if err != nil {
return fmt.Errorf("create aws session: %v", err)
return fmt.Errorf("create aws session: %w", err)
}
k.svc = sqs.New(sess)

Some files were not shown because too many files have changed in this diff Show More