Fix sftp performances and add seaweedfs all-in-one deployment (#6792)

* improve perfs & fix rclone & refactoring
Signed-off-by: Mohamed Sekour <mohamed.sekour@exfo.com>

* improve perfs on download + add seaweedfs all-in-one deployment

Signed-off-by: Mohamed Sekour <mohamed.sekour@exfo.com>

* use helper for topologySpreadConstraints and fix create home dir of sftp users

Signed-off-by: Mohamed Sekour <mohamed.sekour@exfo.com>

* fix helm lint

Signed-off-by: Mohamed Sekour <mohamed.sekour@exfo.com>

* add missing ctx param

Signed-off-by: Mohamed Sekour <mohamed.sekour@exfo.com>

---------

Signed-off-by: Mohamed Sekour <mohamed.sekour@exfo.com>
This commit is contained in:
Mohamed Sekour 2025-05-26 09:50:48 +02:00 committed by GitHub
parent ea70d17c5f
commit 27a392f706
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
31 changed files with 1174 additions and 808 deletions

View File

@ -197,4 +197,26 @@ or generate a new random password if it doesn't exist.
{{- else -}}
{{- randAlphaNum $length -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- /*
Render a components topologySpreadConstraints exactly as given in values,
respecting string vs. list, and providing the component name for tpl lookups.
Usage:
{{ include "seaweedfs.topologySpreadConstraints" (dict "Values" .Values "component" "filer") | nindent 8 }}
*/ -}}
{{- define "seaweedfs.topologySpreadConstraints" -}}
{{- $vals := .Values -}}
{{- $comp := .component -}}
{{- $section := index $vals $comp | default dict -}}
{{- $tsp := index $section "topologySpreadConstraints" -}}
{{- with $tsp }}
topologySpreadConstraints:
{{- if kindIs "string" $tsp }}
{{ tpl $tsp (dict "Values" $vals "component" $comp) }}
{{- else }}
{{ toYaml $tsp }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,427 @@
{{- if .Values.allInOne.enabled }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "seaweedfs.name" . }}-all-in-one
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: seaweedfs-all-in-one
{{- if .Values.allInOne.annotations }}
annotations:
{{- toYaml .Values.allInOne.annotations | nindent 4 }}
{{- end }}
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: seaweedfs-all-in-one
template:
metadata:
labels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: seaweedfs-all-in-one
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.allInOne.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
annotations:
{{- with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.allInOne.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
restartPolicy: {{ default .Values.global.restartPolicy .Values.allInOne.restartPolicy }}
{{- if .Values.allInOne.affinity }}
affinity:
{{ tpl .Values.allInOne.affinity . | nindent 8 | trim }}
{{- end }}
{{- if .Values.allInOne.topologySpreadConstraints }}
{{- include "seaweedfs.topologySpreadConstraints" (dict "Values" .Values "component" "all-in-one") | nindent 6 }}
{{- end }}
{{- if .Values.allInOne.tolerations }}
tolerations:
{{- tpl .Values.allInOne.tolerations . | nindent 8 }}
{{- end }}
{{- include "seaweedfs.imagePullSecrets" . | nindent 6 }}
terminationGracePeriodSeconds: 60
enableServiceLinks: false
{{- if .Values.allInOne.priorityClassName }}
priorityClassName: {{ .Values.allInOne.priorityClassName | quote }}
{{- end }}
{{- if .Values.allInOne.serviceAccountName }}
serviceAccountName: {{ .Values.allInOne.serviceAccountName | quote }}
{{- end }}
{{- if .Values.allInOne.initContainers }}
initContainers:
{{- tpl .Values.allInOne.initContainers . | nindent 8 }}
{{- end }}
{{- if .Values.allInOne.podSecurityContext.enabled }}
securityContext:
{{- omit .Values.allInOne.podSecurityContext "enabled" | toYaml | nindent 8 }}
{{- end }}
containers:
- name: seaweedfs
image: {{ template "master.image" . }}
imagePullPolicy: {{ default "IfNotPresent" .Values.global.imagePullPolicy }}
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: SEAWEEDFS_FULLNAME
value: "{{ template "seaweedfs.name" . }}"
{{- if .Values.allInOne.extraEnvironmentVars }}
{{- range $key, $value := .Values.allInOne.extraEnvironmentVars }}
- name: {{ $key }}
{{- if kindIs "string" $value }}
value: {{ $value | quote }}
{{- else }}
valueFrom:
{{ toYaml $value | nindent 16 }}
{{- end }}
{{- end }}
{{- end }}
{{- if .Values.global.extraEnvironmentVars }}
{{- range $key, $value := .Values.global.extraEnvironmentVars }}
- name: {{ $key }}
{{- if kindIs "string" $value }}
value: {{ $value | quote }}
{{- else }}
valueFrom:
{{ toYaml $value | nindent 16 }}
{{- end }}
{{- end }}
{{- end }}
command:
- "/bin/sh"
- "-ec"
- |
/usr/bin/weed \
-v={{ .Values.global.loggingLevel }} \
server \
-dir=/data \
-master \
-volume \
-ip=${POD_IP} \
-ip.bind=0.0.0.0 \
{{- if .Values.allInOne.idleTimeout }}
-idleTimeout={{ .Values.allInOne.idleTimeout }} \
{{- end }}
{{- if .Values.allInOne.dataCenter }}
-dataCenter={{ .Values.allInOne.dataCenter }} \
{{- end }}
{{- if .Values.allInOne.rack }}
-rack={{ .Values.allInOne.rack }} \
{{- end }}
{{- if .Values.allInOne.whiteList }}
-whiteList={{ .Values.allInOne.whiteList }} \
{{- end }}
{{- if .Values.allInOne.disableHttp }}
-disableHttp={{ .Values.allInOne.disableHttp }} \
{{- end }}
-master.port={{ .Values.master.port }} \
{{- if .Values.global.enableReplication }}
-master.defaultReplication={{ .Values.global.replicationPlacement }} \
{{- else }}
-master.defaultReplication={{ .Values.master.defaultReplication }} \
{{- end }}
{{- if .Values.master.volumePreallocate }}
-master.volumePreallocate \
{{- end }}
-master.volumeSizeLimitMB={{ .Values.master.volumeSizeLimitMB }} \
{{- if .Values.master.garbageThreshold }}
-master.garbageThreshold={{ .Values.master.garbageThreshold }} \
{{- end }}
-volume.port={{ .Values.volume.port }} \
-volume.readMode={{ .Values.volume.readMode }} \
{{- if .Values.volume.imagesFixOrientation }}
-volume.images.fix.orientation \
{{- end }}
{{- if .Values.volume.index }}
-volume.index={{ .Values.volume.index }} \
{{- end }}
{{- if .Values.volume.fileSizeLimitMB }}
-volume.fileSizeLimitMB={{ .Values.volume.fileSizeLimitMB }} \
{{- end }}
-volume.minFreeSpacePercent={{ .Values.volume.minFreeSpacePercent }} \
-volume.compactionMBps={{ .Values.volume.compactionMBps }} \
{{- if .Values.allInOne.metricsPort }}
-metricsPort={{ .Values.allInOne.metricsPort }} \
{{- else if .Values.master.metricsPort }}
-metricsPort={{ .Values.master.metricsPort }} \
{{- end }}
-filer \
-filer.port={{ .Values.filer.port }} \
{{- if .Values.filer.disableDirListing }}
-filer.disableDirListing \
{{- end }}
-filer.dirListLimit={{ .Values.filer.dirListLimit }} \
{{- if .Values.global.enableReplication }}
-filer.defaultReplicaPlacement={{ .Values.global.replicationPlacement }} \
{{- else }}
-filer.defaultReplicaPlacement={{ .Values.filer.defaultReplicaPlacement }} \
{{- end }}
{{- if .Values.filer.maxMB }}
-filer.maxMB={{ .Values.filer.maxMB }} \
{{- end }}
{{- if .Values.filer.encryptVolumeData }}
-filer.encryptVolumeData \
{{- end }}
{{- if .Values.filer.filerGroup}}
-filer.filerGroup={{ .Values.filer.filerGroup}} \
{{- end }}
{{- if .Values.filer.rack }}
-filer.rack={{ .Values.filer.rack }} \
{{- end }}
{{- if .Values.filer.dataCenter }}
-filer.dataCenter={{ .Values.filer.dataCenter }} \
{{- end }}
{{- if .Values.allInOne.s3.enabled }}
-s3 \
-s3.port={{ .Values.s3.port }} \
{{- if .Values.s3.domainName }}
-s3.domainName={{ .Values.s3.domainName }} \
{{- end }}
{{- if .Values.global.enableSecurity }}
{{- if .Values.s3.httpsPort }}
-s3.port.https={{ .Values.s3.httpsPort }} \
{{- end }}
-s3.cert.file=/usr/local/share/ca-certificates/client/tls.crt \
-s3.key.file=/usr/local/share/ca-certificates/client/tls.key \
{{- end }}
{{- if eq (typeOf .Values.s3.allowEmptyFolder) "bool" }}
-s3.allowEmptyFolder={{ .Values.s3.allowEmptyFolder }} \
{{- end }}
{{- if .Values.s3.enableAuth }}
-s3.config=/etc/sw/s3/seaweedfs_s3_config \
{{- end }}
{{- if .Values.s3.auditLogConfig }}
-s3.auditLogConfig=/etc/sw/s3/s3_auditLogConfig.json \
{{- end }}
{{- end }}
{{- if .Values.allInOne.sftp.enabled }}
-sftp \
-sftp.port={{ .Values.sftp.port }} \
{{- if .Values.sftp.sshPrivateKey }}
-sftp.sshPrivateKey={{ .Values.sftp.sshPrivateKey }} \
{{- end }}
{{- if .Values.sftp.hostKeysFolder }}
-sftp.hostKeysFolder={{ .Values.sftp.hostKeysFolder }} \
{{- end }}
{{- if .Values.sftp.authMethods }}
-sftp.authMethods={{ .Values.sftp.authMethods }} \
{{- end }}
{{- if .Values.sftp.maxAuthTries }}
-sftp.maxAuthTries={{ .Values.sftp.maxAuthTries }} \
{{- end }}
{{- if .Values.sftp.bannerMessage }}
-sftp.bannerMessage="{{ .Values.sftp.bannerMessage }}" \
{{- end }}
{{- if .Values.sftp.loginGraceTime }}
-sftp.loginGraceTime={{ .Values.sftp.loginGraceTime }} \
{{- end }}
{{- if .Values.sftp.clientAliveInterval }}
-sftp.clientAliveInterval={{ .Values.sftp.clientAliveInterval }} \
{{- end }}
{{- if .Values.sftp.clientAliveCountMax }}
-sftp.clientAliveCountMax={{ .Values.sftp.clientAliveCountMax }} \
{{- end }}
-sftp.userStoreFile=/etc/sw/sftp/seaweedfs_sftp_config \
{{- end }}
volumeMounts:
- name: data
mountPath: /data
{{- if and .Values.allInOne.s3.enabled (or .Values.s3.enableAuth .Values.filer.s3.enableAuth) }}
- name: config-s3-users
mountPath: /etc/sw/s3
readOnly: true
{{- end }}
{{- if .Values.allInOne.sftp.enabled }}
- name: config-ssh
mountPath: /etc/sw/ssh
readOnly: true
- mountPath: /etc/sw/sftp
name: config-users
readOnly: true
{{- end }}
{{- if .Values.filer.notificationConfig }}
- name: notification-config
mountPath: /etc/seaweedfs/notification.toml
subPath: notification.toml
readOnly: true
{{- end }}
- name: master-config
mountPath: /etc/seaweedfs/master.toml
subPath: master.toml
readOnly: true
{{- if .Values.global.enableSecurity }}
- name: security-config
mountPath: /etc/seaweedfs/security.toml
subPath: security.toml
readOnly: true
- name: ca-cert
mountPath: /usr/local/share/ca-certificates/ca/
readOnly: true
- name: master-cert
mountPath: /usr/local/share/ca-certificates/master/
readOnly: true
- name: volume-cert
mountPath: /usr/local/share/ca-certificates/volume/
readOnly: true
- name: filer-cert
mountPath: /usr/local/share/ca-certificates/filer/
readOnly: true
- name: client-cert
mountPath: /usr/local/share/ca-certificates/client/
readOnly: true
{{- end }}
{{ tpl .Values.allInOne.extraVolumeMounts . | nindent 12 }}
ports:
- containerPort: {{ .Values.master.port }}
name: swfs-mas
- containerPort: {{ .Values.master.grpcPort }}
name: swfs-mas-grpc
- containerPort: {{ .Values.volume.port }}
name: swfs-vol
- containerPort: {{ .Values.volume.grpcPort }}
name: swfs-vol-grpc
- containerPort: {{ .Values.filer.port }}
name: swfs-fil
- containerPort: {{ .Values.filer.grpcPort }}
name: swfs-fil-grpc
{{- if .Values.allInOne.s3.enabled }}
- containerPort: {{ .Values.s3.port }}
name: swfs-s3
{{- if .Values.s3.httpsPort }}
- containerPort: {{ .Values.s3.httpsPort }}
name: swfs-s3-tls
{{- end }}
{{- end }}
{{- if .Values.allInOne.sftp.enabled }}
- containerPort: {{ .Values.sftp.port }}
name: swfs-sftp
{{- end }}
{{- if .Values.allInOne.metricsPort }}
- containerPort: {{ .Values.allInOne.metricsPort }}
name: server-metrics
{{- end }}
{{- if .Values.allInOne.readinessProbe.enabled }}
readinessProbe:
httpGet:
path: {{ .Values.allInOne.readinessProbe.httpGet.path }}
port: {{ .Values.master.port }}
scheme: {{ .Values.allInOne.readinessProbe.scheme }}
initialDelaySeconds: {{ .Values.allInOne.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.allInOne.readinessProbe.periodSeconds }}
successThreshold: {{ .Values.allInOne.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.allInOne.readinessProbe.failureThreshold }}
timeoutSeconds: {{ .Values.allInOne.readinessProbe.timeoutSeconds }}
{{- end }}
{{- if .Values.allInOne.livenessProbe.enabled }}
livenessProbe:
httpGet:
path: {{ .Values.allInOne.livenessProbe.httpGet.path }}
port: {{ .Values.master.port }}
scheme: {{ .Values.allInOne.livenessProbe.scheme }}
initialDelaySeconds: {{ .Values.allInOne.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.allInOne.livenessProbe.periodSeconds }}
successThreshold: {{ .Values.allInOne.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.allInOne.livenessProbe.failureThreshold }}
timeoutSeconds: {{ .Values.allInOne.livenessProbe.timeoutSeconds }}
{{- end }}
{{- with .Values.allInOne.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if .Values.allInOne.containerSecurityContext.enabled }}
securityContext:
{{- omit .Values.allInOne.containerSecurityContext "enabled" | toYaml | nindent 12 }}
{{- end }}
{{- if .Values.allInOne.sidecars }}
{{- include "common.tplvalues.render" (dict "value" .Values.allInOne.sidecars "context" $) | nindent 8 }}
{{- end }}
volumes:
- name: data
{{- if eq .Values.allInOne.data.type "hostPath" }}
hostPath:
path: {{ .Values.allInOne.data.hostPathPrefix }}/seaweedfs-all-in-one-data/
type: DirectoryOrCreate
{{- else if eq .Values.allInOne.data.type "persistentVolumeClaim" }}
persistentVolumeClaim:
claimName: {{ .Values.allInOne.data.claimName }}
{{- else if eq .Values.allInOne.data.type "emptyDir" }}
emptyDir: {}
{{- end }}
{{- if and .Values.allInOne.s3.enabled (or .Values.s3.enableAuth .Values.filer.s3.enableAuth) }}
- name: config-s3-users
secret:
defaultMode: 420
secretName: {{ default (printf "%s-s3-secret" (include "seaweedfs.name" .)) (or .Values.s3.existingConfigSecret .Values.filer.s3.existingConfigSecret) }}
{{- end }}
{{- if .Values.allInOne.sftp.enabled }}
- name: config-ssh
secret:
defaultMode: 420
secretName: {{ default (printf "%s-sftp-ssh-secret" (include "seaweedfs.name" .)) .Values.sftp.existingSshConfigSecret }}
- name: config-users
secret:
defaultMode: 420
secretName: {{ default (printf "%s-sftp-secret" (include "seaweedfs.name" .)) .Values.sftp.existingConfigSecret }}
{{- end }}
{{- if .Values.filer.notificationConfig }}
- name: notification-config
configMap:
name: {{ template "seaweedfs.name" . }}-notification-config
{{- end }}
- name: master-config
configMap:
name: {{ template "seaweedfs.name" . }}-master-config
{{- if .Values.global.enableSecurity }}
- name: security-config
configMap:
name: {{ template "seaweedfs.name" . }}-security-config
- name: ca-cert
secret:
secretName: {{ template "seaweedfs.name" . }}-ca-cert
- name: master-cert
secret:
secretName: {{ template "seaweedfs.name" . }}-master-cert
- name: volume-cert
secret:
secretName: {{ template "seaweedfs.name" . }}-volume-cert
- name: filer-cert
secret:
secretName: {{ template "seaweedfs.name" . }}-filer-cert
- name: client-cert
secret:
secretName: {{ template "seaweedfs.name" . }}-client-cert
{{- end }}
{{ tpl .Values.allInOne.extraVolumes . | nindent 8 }}
{{- if .Values.allInOne.nodeSelector }}
nodeSelector:
{{ tpl .Values.allInOne.nodeSelector . | nindent 8 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,21 @@
{{- if and .Values.allInOne.enabled (eq .Values.allInOne.data.type "persistentVolumeClaim") }}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ .Values.allInOne.data.claimName }}
labels:
app.kubernetes.io/component: seaweedfs-all-in-one
{{- if .Values.allInOne.annotations }}
annotations:
{{- toYaml .Values.allInOne.annotations | nindent 4 }}
{{- end }}
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ .Values.allInOne.data.size }}
{{- if .Values.allInOne.data.storageClass }}
storageClassName: {{ .Values.allInOne.data.storageClass }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,83 @@
{{- if .Values.allInOne.enabled }}
apiVersion: v1
kind: Service
metadata:
name: {{ template "seaweedfs.name" . }}-all-in-one
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: seaweedfs-all-in-one
{{- if .Values.allInOne.service.annotations }}
annotations:
{{- toYaml .Values.allInOne.service.annotations | nindent 4 }}
{{- end }}
spec:
internalTrafficPolicy: {{ .Values.allInOne.service.internalTrafficPolicy | default "Cluster" }}
ports:
# Master ports
- name: "swfs-master"
port: {{ .Values.master.port }}
targetPort: {{ .Values.master.port }}
protocol: TCP
- name: "swfs-master-grpc"
port: {{ .Values.master.grpcPort }}
targetPort: {{ .Values.master.grpcPort }}
protocol: TCP
# Volume ports
- name: "swfs-volume"
port: {{ .Values.volume.port }}
targetPort: {{ .Values.volume.port }}
protocol: TCP
- name: "swfs-volume-grpc"
port: {{ .Values.volume.grpcPort }}
targetPort: {{ .Values.volume.grpcPort }}
protocol: TCP
# Filer ports
- name: "swfs-filer"
port: {{ .Values.filer.port }}
targetPort: {{ .Values.filer.port }}
protocol: TCP
- name: "swfs-filer-grpc"
port: {{ .Values.filer.grpcPort }}
targetPort: {{ .Values.filer.grpcPort }}
protocol: TCP
# S3 ports (if enabled)
{{- if .Values.allInOne.s3.enabled }}
- name: "swfs-s3"
port: {{ if .Values.allInOne.s3.enabled }}{{ .Values.s3.port }}{{ else }}{{ .Values.filer.s3.port }}{{ end }}
targetPort: {{ if .Values.allInOne.s3.enabled }}{{ .Values.s3.port }}{{ else }}{{ .Values.filer.s3.port }}{{ end }}
protocol: TCP
{{- if and .Values.allInOne.s3.enabled .Values.s3.httpsPort }}
- name: "swfs-s3-tls"
port: {{ .Values.s3.httpsPort }}
targetPort: {{ .Values.s3.httpsPort }}
protocol: TCP
{{- end }}
{{- end }}
# SFTP ports (if enabled)
{{- if .Values.allInOne.sftp.enabled }}
- name: "swfs-sftp"
port: {{ .Values.sftp.port }}
targetPort: {{ .Values.sftp.port }}
protocol: TCP
{{- end }}
# Server metrics port (single metrics endpoint for all services)
{{- if .Values.allInOne.metricsPort }}
- name: "server-metrics"
port: {{ .Values.allInOne.metricsPort }}
targetPort: {{ .Values.allInOne.metricsPort }}
protocol: TCP
{{- end }}
selector:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
app.kubernetes.io/component: seaweedfs-all-in-one
{{- end }}

View File

@ -0,0 +1,29 @@
{{- if .Values.allInOne.enabled }}
{{- if .Values.global.monitoring.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "seaweedfs.name" . }}-all-in-one
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: all-in-one
{{- with .Values.global.monitoring.additionalLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
endpoints:
{{- if .Values.allInOne.metricsPort }}
- interval: 30s
port: server-metrics
scrapeTimeout: 5s
{{- end }}
selector:
matchLabels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
app.kubernetes.io/component: seaweedfs-all-in-one
{{- end }}
{{- end }}

View File

@ -9,11 +9,13 @@ metadata:
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: objectstorage-provisioner
spec:
replicas: {{ .Values.cosi.replicas }}
selector:
matchLabels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: objectstorage-provisioner
template:
@ -38,6 +40,13 @@ spec:
{{- end }}
spec:
restartPolicy: {{ default .Values.global.restartPolicy .Values.cosi.restartPolicy }}
{{- if .Values.cosi.affinity }}
affinity:
{{ tpl .Values.cosi.affinity . | nindent 8 | trim }}
{{- end }}
{{- if .Values.cosi.topologySpreadConstraints }}
{{- include "seaweedfs.topologySpreadConstraints" (dict "Values" .Values "component" "objectstorage-provisioner") | nindent 6 }}
{{- end }}
{{- if .Values.cosi.tolerations }}
tolerations:
{{ tpl .Values.cosi.tolerations . | nindent 8 | trim }}

View File

@ -61,9 +61,9 @@ spec:
affinity:
{{ tpl .Values.filer.affinity . | nindent 8 | trim }}
{{- end }}
{{- with .Values.filer.topologySpreadConstraints }}
{{- if .Values.filer.topologySpreadConstraints }}
topologySpreadConstraints:
{{- toYaml . | nindent 8 }}
{{- include "seaweedfs.topologySpreadConstraints" (dict "Values" .Values "component" "filer") | nindent 6 }}
{{- end }}
{{- if .Values.filer.tolerations }}
tolerations:

View File

@ -1,4 +1,4 @@
{{- if .Values.master.enabled }}
{{- if or .Values.master.enabled .Values.allInOne.enabled }}
apiVersion: v1
kind: ConfigMap
metadata:

View File

@ -9,6 +9,7 @@ metadata:
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: master
{{- if .Values.master.annotations }}
annotations:
{{- toYaml .Values.master.annotations | nindent 4 }}
@ -54,9 +55,8 @@ spec:
affinity:
{{ tpl .Values.master.affinity . | nindent 8 | trim }}
{{- end }}
{{- with .Values.master.topologySpreadConstraints }}
topologySpreadConstraints:
{{- toYaml . | nindent 8 }}
{{- if .Values.master.topologySpreadConstraints }}
{{- include "seaweedfs.topologySpreadConstraints" (dict "Values" .Values "component" "master") | nindent 6 }}
{{- end }}
{{- if .Values.master.tolerations }}
tolerations:

View File

@ -9,6 +9,7 @@ metadata:
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: s3
{{- if .Values.s3.annotations }}
annotations:
{{- toYaml .Values.s3.annotations | nindent 4 }}
@ -42,6 +43,13 @@ spec:
{{- end }}
spec:
restartPolicy: {{ default .Values.global.restartPolicy .Values.s3.restartPolicy }}
{{- if .Values.s3.affinity }}
affinity:
{{ tpl .Values.s3.affinity . | nindent 8 | trim }}
{{- end }}
{{- if .Values.s3.topologySpreadConstraints }}
{{- include "seaweedfs.topologySpreadConstraints" (dict "Values" .Values "component" "s3") | nindent 6 }}
{{- end }}
{{- if .Values.s3.tolerations }}
tolerations:
{{ tpl .Values.s3.tolerations . | nindent 8 | trim }}

View File

@ -1,4 +1,4 @@
{{- if or (and .Values.filer.s3.enabled .Values.filer.s3.enableAuth (not .Values.filer.s3.existingConfigSecret)) (and .Values.s3.enabled .Values.s3.enableAuth (not .Values.s3.existingConfigSecret)) }}
{{- if or (and (or .Values.s3.enabled .Values.allInOne.enabled) .Values.s3.enableAuth (not .Values.s3.existingConfigSecret)) (and .Values.filer.s3.enabled .Values.filer.s3.enableAuth (not .Values.filer.s3.existingConfigSecret)) }}
{{- $access_key_admin := include "getOrGeneratePassword" (dict "namespace" .Release.Namespace "secretName" "seaweedfs-s3-secret" "key" "admin_access_key_id" "length" 20) -}}
{{- $secret_key_admin := include "getOrGeneratePassword" (dict "namespace" .Release.Namespace "secretName" "seaweedfs-s3-secret" "key" "admin_secret_access_key" "length" 40) -}}
{{- $access_key_read := include "getOrGeneratePassword" (dict "namespace" .Release.Namespace "secretName" "seaweedfs-s3-secret" "key" "read_access_key_id" "length" 20) -}}

View File

@ -1,20 +1,19 @@
{{- if .Values.global.monitoring.enabled }}
{{- $files := .Files.Glob "dashboards/*.json" }}
{{- if $files }}
apiVersion: v1
kind: ConfigMapList
items:
{{- range $path, $fileContents := $files }}
{{- range $path, $file := $files }}
{{- $dashboardName := regexReplaceAll "(^.*/)(.*)\\.json$" $path "${2}" }}
- apiVersion: v1
kind: ConfigMap
metadata:
name: {{ printf "%s" $dashboardName | lower | replace "_" "-" }}
namespace: {{ $.Release.Namespace }}
labels:
grafana_dashboard: "1"
data:
{{ $dashboardName }}.json: {{ $.Files.Get $path | toJson }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ printf "%s" $dashboardName | lower | replace "_" "-" }}
namespace: {{ $.Release.Namespace }}
labels:
grafana_dashboard: "1"
data:
{{ $dashboardName }}.json: |-
{{ toString $file | indent 4 }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -9,6 +9,7 @@ metadata:
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: sftp
{{- if .Values.sftp.annotations }}
annotations:
{{- toYaml .Values.sftp.annotations | nindent 4 }}
@ -42,6 +43,13 @@ spec:
{{- end }}
spec:
restartPolicy: {{ default .Values.global.restartPolicy .Values.sftp.restartPolicy }}
{{- if .Values.sftp.affinity }}
affinity:
{{ tpl .Values.sftp.affinity . | nindent 8 | trim }}
{{- end }}
{{- if .Values.sftp.topologySpreadConstraints }}
{{- include "seaweedfs.topologySpreadConstraints" (dict "Values" .Values "component" "sftp") | nindent 6 }}
{{- end }}
{{- if .Values.sftp.tolerations }}
tolerations:
{{ tpl .Values.sftp.tolerations . | nindent 8 | trim }}

View File

@ -1,4 +1,4 @@
{{- if .Values.sftp.enabled }}
{{- if or .Values.sftp.enabled .Values.allInOne.enabled }}
{{- $admin_pwd := include "getOrGeneratePassword" (dict "namespace" .Release.Namespace "secretName" "seaweedfs-sftp-secret" "key" "admin_password" 20) -}}
{{- $read_user_pwd := include "getOrGeneratePassword" (dict "namespace" .Release.Namespace "secretName" "seaweedfs-sftp-secret" "key" "readonly_password" 20) -}}
{{- $public_user_pwd := include "getOrGeneratePassword" (dict "namespace" .Release.Namespace "secretName" "seaweedfs-sftp-secret" "key" "public_user_password" 20) -}}

View File

@ -14,24 +14,17 @@ metadata:
{{- toYaml .Values.sftp.annotations | nindent 4 }}
{{- end }}
spec:
type: {{ .Values.sftp.service.type | default "ClusterIP" }}
internalTrafficPolicy: {{ .Values.sftp.internalTrafficPolicy | default "Cluster" }}
ports:
- name: "swfs-sftp"
port: {{ .Values.sftp.port }}
targetPort: {{ .Values.sftp.port }}
protocol: TCP
{{- if and (eq (.Values.sftp.service.type | default "ClusterIP") "NodePort") .Values.sftp.service.nodePort }}
nodePort: {{ .Values.sftp.service.nodePort }}
{{- end }}
{{- if .Values.sftp.metricsPort }}
- name: "metrics"
port: {{ .Values.sftp.metricsPort }}
targetPort: {{ .Values.sftp.metricsPort }}
protocol: TCP
{{- if and (eq (.Values.sftp.service.type | default "ClusterIP") "NodePort") .Values.sftp.service.metricsNodePort }}
nodePort: {{ .Values.sftp.service.metricsNodePort }}
{{- end }}
{{- end }}
selector:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}

View File

@ -9,6 +9,7 @@ metadata:
helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: volume
{{- if .Values.volume.annotations }}
annotations:
{{- toYaml .Values.volume.annotations | nindent 4 }}
@ -47,9 +48,8 @@ spec:
affinity:
{{ tpl .Values.volume.affinity . | nindent 8 | trim }}
{{- end }}
{{- with .Values.volume.topologySpreadConstraints }}
topologySpreadConstraints:
{{- toYaml . | nindent 8 }}
{{- if .Values.volume.topologySpreadConstraints }}
{{- include "seaweedfs.topologySpreadConstraints" (dict "Values" .Values "component" "volume") | nindent 6 }}
{{- end }}
restartPolicy: {{ default .Values.global.restartPolicy .Values.volume.restartPolicy }}
{{- if .Values.volume.tolerations }}

View File

@ -173,7 +173,7 @@ master:
# Topology Spread Constraints Settings
# This should map directly to the value of the topologySpreadConstraints
# for a PodSpec. By Default no constraints are set.
topologySpreadConstraints: {}
topologySpreadConstraints: null
# Toleration Settings for master pods
# This should be a multi-line string matching the Toleration array
@ -436,7 +436,7 @@ volume:
# Topology Spread Constraints Settings
# This should map directly to the value of the topologySpreadConstraints
# for a PodSpec. By Default no constraints are set.
topologySpreadConstraints: {}
topologySpreadConstraints: null
# Resource requests, limits, etc. for the server cluster placement. This
# should map directly to the value of the resources field for a PodSpec,
@ -655,7 +655,7 @@ filer:
# Topology Spread Constraints Settings
# This should map directly to the value of the topologySpreadConstraints
# for a PodSpec. By Default no constraints are set.
topologySpreadConstraints: {}
topologySpreadConstraints: null
# updatePartition is used to control a careful rolling update of SeaweedFS
# masters.
@ -949,6 +949,7 @@ s3:
# additional ingress annotations for the s3 endpoint
annotations: {}
tls: []
sftp:
enabled: false
imageOverride: null
@ -958,10 +959,6 @@ sftp:
port: 2022 # Default SFTP port
metricsPort: 9327
metricsIp: "" # If empty, defaults to bindAddress
service:
type: ClusterIP # Can be ClusterIP, NodePort, LoadBalancer
nodePort: null # Optional: specific nodePort for SFTP
metricsNodePort: null # Optional: specific nodePort for metrics
loggingOverrideLevel: null
# SSH server configuration
@ -1025,6 +1022,143 @@ sftp:
successThreshold: 1
failureThreshold: 100
timeoutSeconds: 10
# All-in-one deployment configuration
allInOne:
enabled: false
imageOverride: null
restartPolicy: Always
replicas: 1
# Core configuration
idleTimeout: 30 # Connection idle seconds
dataCenter: "" # Current volume server's data center name
rack: "" # Current volume server's rack name
whiteList: "" # Comma separated IP addresses having write permission
disableHttp: false # Disable HTTP requests, only gRPC operations are allowed
metricsPort: 9324 # Prometheus metrics listen port
metricsIp: "" # Metrics listen IP. If empty, defaults to bindAddress
loggingOverrideLevel: null # Override logging level
# Service configuration
s3:
enabled: false # Whether to enable S3 gateway
sftp:
enabled: false # Whether to enable SFTP server
# Service settings
service:
annotations: {} # Annotations for the service
type: ClusterIP # Service type (ClusterIP, NodePort, LoadBalancer)
# Storage configuration
data:
type: "emptyDir" # Options: "hostPath", "persistentVolumeClaim", "emptyDir"
hostPathPrefix: /mnt/data # Path prefix for hostPath volumes
claimName: seaweedfs-data-pvc # Name of the PVC to use
size: "" # Size of the PVC
storageClass: "" # Storage class for the PVC
# Health checks
readinessProbe:
enabled: true
httpGet:
path: /cluster/status
port: 9333
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 15
successThreshold: 1
failureThreshold: 3
timeoutSeconds: 5
livenessProbe:
enabled: true
httpGet:
path: /cluster/status
port: 9333
scheme: HTTP
initialDelaySeconds: 20
periodSeconds: 30
successThreshold: 1
failureThreshold: 5
timeoutSeconds: 5
# Additional resources
extraEnvironmentVars: {} # Additional environment variables
extraVolumeMounts: "" # Additional volume mounts
extraVolumes: "" # Additional volumes
initContainers: "" # Init containers
sidecars: "" # Sidecar containers
annotations: {} # Annotations for the deployment
podAnnotations: {} # Annotations for the pods
podLabels: {} # Labels for the pods
# Scheduling configuration
# Affinity Settings
# Commenting out or setting as empty the affinity variable, will allow
# deployment to single node services such as Minikube
affinity: |
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app.kubernetes.io/name: {{ template "seaweedfs.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: master
topologyKey: kubernetes.io/hostname
# Topology Spread Constraints Settings
# This should map directly to the value of the topologySpreadConstraints
# for a PodSpec. By Default no constraints are set.
topologySpreadConstraints: null
# Toleration Settings for master pods
# This should be a multi-line string matching the Toleration array
# in a PodSpec.
tolerations: ""
# nodeSelector labels for master pod assignment, formatted as a muli-line string.
# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
nodeSelector: |
kubernetes.io/arch: amd64
# Used to assign priority to master pods
# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/
priorityClassName: ""
# Used to assign a service account.
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
serviceAccountName: ""
# Configure security context for Pod
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
# Example:
# podSecurityContext:
# enabled: true
# runAsUser: 1000
# runAsGroup: 3000
# fsGroup: 2000
podSecurityContext: {}
# Configure security context for Container
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
# Example:
# containerSecurityContext:
# enabled: true
# runAsUser: 2000
# allowPrivilegeEscalation: false
containerSecurityContext: {}
# Resource management
resources:
limits:
cpu: "2"
memory: "2Gi"
requests:
cpu: "500m"
memory: "1Gi"
# Deploy Kubernetes COSI Driver for SeaweedFS
# Requires COSI CRDs and controller to be installed in the cluster
# For more information, visit: https://container-object-storage-interface.github.io/docs/deployment-guide

View File

@ -17,12 +17,11 @@ type Manager struct {
userStore user.Store
passwordAuth *PasswordAuthenticator
publicKeyAuth *PublicKeyAuthenticator
permissionChecker *PermissionChecker
enabledAuthMethods []string
}
// NewManager creates a new authentication manager
func NewManager(userStore user.Store, fsHelper FileSystemHelper, enabledAuthMethods []string) *Manager {
func NewManager(userStore user.Store, enabledAuthMethods []string) *Manager {
manager := &Manager{
userStore: userStore,
enabledAuthMethods: enabledAuthMethods,
@ -43,7 +42,6 @@ func NewManager(userStore user.Store, fsHelper FileSystemHelper, enabledAuthMeth
manager.passwordAuth = NewPasswordAuthenticator(userStore, passwordEnabled)
manager.publicKeyAuth = NewPublicKeyAuthenticator(userStore, publicKeyEnabled)
manager.permissionChecker = NewPermissionChecker(fsHelper)
return manager
}
@ -65,11 +63,6 @@ func (m *Manager) GetSSHServerConfig() *ssh.ServerConfig {
return config
}
// CheckPermission checks if a user has the required permission on a path
func (m *Manager) CheckPermission(user *user.User, path, permission string) error {
return m.permissionChecker.CheckFilePermission(user, path, permission)
}
// GetUser retrieves a user from the user store
func (m *Manager) GetUser(username string) (*user.User, error) {
return m.userStore.GetUser(username)

View File

@ -51,14 +51,3 @@ func (a *PasswordAuthenticator) Authenticate(conn ssh.ConnMetadata, password []b
return nil, fmt.Errorf("authentication failed")
}
// ValidatePassword checks if the provided password is valid for the user
func ValidatePassword(store user.Store, username string, password []byte) bool {
user, err := store.GetUser(username)
if err != nil {
return false
}
// Compare plaintext password
return string(password) == user.Password
}

View File

@ -1,7 +1,6 @@
package auth
import (
"crypto/subtle"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/sftpd/user"
@ -40,7 +39,7 @@ func (a *PublicKeyAuthenticator) Authenticate(conn ssh.ConnMetadata, key ssh.Pub
keyData := string(key.Marshal())
// Validate public key
if ValidatePublicKey(a.userStore, username, keyData) {
if a.userStore.ValidatePublicKey(username, keyData) {
return &ssh.Permissions{
Extensions: map[string]string{
"username": username,
@ -50,19 +49,3 @@ func (a *PublicKeyAuthenticator) Authenticate(conn ssh.ConnMetadata, key ssh.Pub
return nil, fmt.Errorf("authentication failed")
}
// ValidatePublicKey checks if the provided public key is valid for the user
func ValidatePublicKey(store user.Store, username string, keyData string) bool {
user, err := store.GetUser(username)
if err != nil {
return false
}
for _, key := range user.PublicKeys {
if subtle.ConstantTimeCompare([]byte(key), []byte(keyData)) == 1 {
return true
}
}
return false
}

View File

@ -0,0 +1,99 @@
package sftpd
import (
"fmt"
"io"
"sync"
"github.com/seaweedfs/seaweedfs/weed/filer"
filer_pb "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/sftpd/utils"
)
type SeaweedFileReaderAt struct {
fs *SftpServer
entry *filer_pb.Entry
reader io.ReadSeeker
mu sync.Mutex
bufferSize int
cache *utils.LruCache
fileSize int64
}
func NewSeaweedFileReaderAt(fs *SftpServer, entry *filer_pb.Entry) *SeaweedFileReaderAt {
return &SeaweedFileReaderAt{
fs: fs,
entry: entry,
bufferSize: 5 * 1024 * 1024, // 5MB
cache: utils.NewLRUCache(10), // Max 10 chunks = ~50MB
fileSize: int64(entry.Attributes.FileSize),
}
}
func (ra *SeaweedFileReaderAt) ReadAt(p []byte, off int64) (n int, err error) {
ra.mu.Lock()
defer ra.mu.Unlock()
if off >= ra.fileSize {
return 0, io.EOF
}
remaining := len(p)
readOffset := off
totalRead := 0
for remaining > 0 && readOffset < ra.fileSize {
bufferKey := (readOffset / int64(ra.bufferSize)) * int64(ra.bufferSize)
bufferOffset := int(readOffset - bufferKey)
buffer, ok := ra.cache.Get(bufferKey)
if !ok {
readSize := ra.bufferSize
if bufferKey+int64(readSize) > ra.fileSize {
readSize = int(ra.fileSize - bufferKey)
}
if ra.reader == nil {
r := filer.NewFileReader(ra.fs, ra.entry)
if rs, ok := r.(io.ReadSeeker); ok {
ra.reader = rs
} else {
return 0, fmt.Errorf("reader is not seekable")
}
}
if _, err := ra.reader.Seek(bufferKey, io.SeekStart); err != nil {
return 0, fmt.Errorf("seek error: %v", err)
}
buffer = make([]byte, readSize)
readBytes, err := io.ReadFull(ra.reader, buffer)
if err != nil && err != io.ErrUnexpectedEOF {
return 0, fmt.Errorf("read error: %v", err)
}
buffer = buffer[:readBytes]
ra.cache.Put(bufferKey, buffer)
}
toCopy := len(buffer) - bufferOffset
if toCopy > remaining {
toCopy = remaining
}
if toCopy <= 0 {
break
}
copy(p[totalRead:], buffer[bufferOffset:bufferOffset+toCopy])
totalRead += toCopy
readOffset += int64(toCopy)
remaining -= toCopy
}
if totalRead == 0 {
return 0, io.EOF
}
if totalRead < len(p) {
return totalRead, io.EOF
}
return totalRead, nil
}

View File

@ -8,8 +8,6 @@ import (
"time"
"github.com/pkg/sftp"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/util"
)
// FileInfo implements os.FileInfo.
@ -70,57 +68,43 @@ func (l listerat) ListAt(ls []os.FileInfo, offset int64) (int, error) {
return n, nil
}
// filerFileWriter buffers writes and flushes on Close.
type filerFileWriter struct {
// SeaweedSftpFileWriter buffers writes and flushes on Close.
type SeaweedSftpFileWriter struct {
fs SftpServer
req *sftp.Request
mu sync.Mutex
data []byte
tmpFile *os.File
permissions os.FileMode
uid uint32
gid uint32
offset int64
}
func (w *filerFileWriter) Write(p []byte) (int, error) {
func (w *SeaweedSftpFileWriter) Write(p []byte) (int, error) {
w.mu.Lock()
defer w.mu.Unlock()
end := w.offset + int64(len(p))
if end > int64(len(w.data)) {
newBuf := make([]byte, end)
copy(newBuf, w.data)
w.data = newBuf
}
n := copy(w.data[w.offset:], p)
n, err := w.tmpFile.WriteAt(p, w.offset)
w.offset += int64(n)
return n, nil
return n, err
}
func (w *filerFileWriter) WriteAt(p []byte, off int64) (int, error) {
func (w *SeaweedSftpFileWriter) WriteAt(p []byte, off int64) (int, error) {
w.mu.Lock()
defer w.mu.Unlock()
end := int(off) + len(p)
if end > len(w.data) {
newBuf := make([]byte, end)
copy(newBuf, w.data)
w.data = newBuf
}
n := copy(w.data[off:], p)
return n, nil
return w.tmpFile.WriteAt(p, off)
}
func (w *filerFileWriter) Close() error {
func (w *SeaweedSftpFileWriter) Close() error {
w.mu.Lock()
defer w.mu.Unlock()
dir, _ := util.FullPath(w.req.Filepath).DirAndName()
defer os.Remove(w.tmpFile.Name()) // Clean up temp file
defer w.tmpFile.Close()
// Check permissions based on file metadata and user permissions
if err := w.fs.checkFilePermission(dir, "write"); err != nil {
glog.Errorf("Permission denied for %s", dir)
if _, err := w.tmpFile.Seek(0, io.SeekStart); err != nil {
return err
}
// Call the extracted putFile method on SftpServer
return w.fs.putFile(w.req.Filepath, w.data, w.fs.user)
// Stream the file instead of loading it
return w.fs.putFile(w.req.Filepath, w.tmpFile, w.fs.user)
}

View File

@ -2,7 +2,6 @@
package sftpd
import (
"bytes"
"context"
"crypto/md5"
"encoding/json"
@ -15,7 +14,6 @@ import (
"time"
"github.com/pkg/sftp"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb"
filer_pb "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
@ -48,6 +46,9 @@ func (fs *SftpServer) getEntry(p string) (*filer_pb.Entry, error) {
err := fs.callWithClient(false, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
r, err := client.LookupDirectoryEntry(ctx, &filer_pb.LookupDirectoryEntryRequest{Directory: dir, Name: name})
if err != nil {
if isNotExistError(err) {
return os.ErrNotExist
}
return err
}
if r.Entry == nil {
@ -57,11 +58,21 @@ func (fs *SftpServer) getEntry(p string) (*filer_pb.Entry, error) {
return nil
})
if err != nil {
if isNotExistError(err) {
return nil, os.ErrNotExist
}
return nil, fmt.Errorf("lookup %s: %w", p, err)
}
return entry, nil
}
func isNotExistError(err error) bool {
return strings.Contains(err.Error(), "not found") ||
strings.Contains(err.Error(), "no entry is found") ||
strings.Contains(err.Error(), "file does not exist") ||
err == os.ErrNotExist
}
// updateEntry sends an UpdateEntryRequest for the given entry.
func (fs *SftpServer) updateEntry(dir string, entry *filer_pb.Entry) error {
return fs.callWithClient(false, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
@ -116,94 +127,30 @@ func (fs *SftpServer) readFile(r *sftp.Request) (io.ReaderAt, error) {
if err != nil {
return nil, err
}
return &SeaweedFileReaderAt{fs: fs, entry: entry}, nil
}
// putFile uploads a file to the filer and sets ownership metadata.
func (fs *SftpServer) putFile(filepath string, data []byte, user *user.User) error {
dir, filename := util.FullPath(filepath).DirAndName()
uploadUrl := fmt.Sprintf("http://%s%s", fs.filerAddr, filepath)
// Create a reader from our buffered data and calculate MD5 hash
hash := md5.New()
reader := bytes.NewReader(data)
body := io.TeeReader(reader, hash)
fileSize := int64(len(data))
// Create and execute HTTP request
proxyReq, err := http.NewRequest(http.MethodPut, uploadUrl, body)
if err != nil {
return fmt.Errorf("create request: %v", err)
}
proxyReq.ContentLength = fileSize
proxyReq.Header.Set("Content-Type", "application/octet-stream")
client := &http.Client{}
resp, err := client.Do(proxyReq)
if err != nil {
return fmt.Errorf("upload to filer: %v", err)
}
defer resp.Body.Close()
// Process response
respBody, err := io.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("read response: %v", err)
}
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated {
return fmt.Errorf("upload failed with status %d: %s", resp.StatusCode, string(respBody))
}
var result weed_server.FilerPostResult
if err := json.Unmarshal(respBody, &result); err != nil {
return fmt.Errorf("parse response: %v", err)
}
if result.Error != "" {
return fmt.Errorf("filer error: %s", result.Error)
}
// Update file ownership using the same pattern as other functions
if user != nil {
err := fs.callWithClient(false, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
// Look up the file to get its current entry
lookupResp, err := client.LookupDirectoryEntry(ctx, &filer_pb.LookupDirectoryEntryRequest{
Directory: dir,
Name: filename,
})
if err != nil {
return fmt.Errorf("lookup file for attribute update: %v", err)
}
if lookupResp.Entry == nil {
return fmt.Errorf("file not found after upload: %s/%s", dir, filename)
}
// Update the entry with new uid/gid
entry := lookupResp.Entry
entry.Attributes.Uid = user.Uid
entry.Attributes.Gid = user.Gid
// Update the entry in the filer
_, err = client.UpdateEntry(ctx, &filer_pb.UpdateEntryRequest{
Directory: dir,
Entry: entry,
})
return err
})
if err != nil {
// Log the error but don't fail the whole operation
glog.Errorf("Failed to update file ownership for %s: %v", filepath, err)
}
}
return nil
return NewSeaweedFileReaderAt(fs, entry), nil
}
func (fs *SftpServer) newFileWriter(r *sftp.Request) (io.WriterAt, error) {
return &filerFileWriter{fs: *fs, req: r, permissions: 0644, uid: fs.user.Uid, gid: fs.user.Gid}, nil
dir, _ := util.FullPath(r.Filepath).DirAndName()
if err := fs.checkFilePermission(dir, "write"); err != nil {
glog.Errorf("Permission denied for %s", dir)
return nil, err
}
// Create a temporary file to buffer writes
tmpFile, err := os.CreateTemp("", "sftp-upload-*")
if err != nil {
return nil, fmt.Errorf("failed to create temp file: %v", err)
}
return &SeaweedSftpFileWriter{
fs: *fs,
req: r,
tmpFile: tmpFile,
permissions: 0644,
uid: fs.user.Uid,
gid: fs.user.Gid,
offset: 0,
}, nil
}
func (fs *SftpServer) removeEntry(r *sftp.Request) error {
@ -317,7 +264,7 @@ func (fs *SftpServer) makeDir(r *sftp.Request) error {
return fmt.Errorf("cannot create directory: no user info")
}
dir, name := util.FullPath(r.Filepath).DirAndName()
if err := fs.checkFilePermission(dir, "mkdir"); err != nil {
if err := fs.checkFilePermission(r.Filepath, "mkdir"); err != nil {
return err
}
// default mode and ownership
@ -345,6 +292,81 @@ func (fs *SftpServer) removeDir(r *sftp.Request) error {
return fs.deleteEntry(r.Filepath, false)
}
func (fs *SftpServer) putFile(filepath string, reader io.Reader, user *user.User) error {
dir, filename := util.FullPath(filepath).DirAndName()
uploadUrl := fmt.Sprintf("http://%s%s", fs.filerAddr, filepath)
// Compute MD5 while uploading
hash := md5.New()
body := io.TeeReader(reader, hash)
// We can skip ContentLength if unknown (chunked transfer encoding)
req, err := http.NewRequest(http.MethodPut, uploadUrl, body)
if err != nil {
return fmt.Errorf("create request: %v", err)
}
req.Header.Set("Content-Type", "application/octet-stream")
resp, err := http.DefaultClient.Do(req)
if err != nil {
return fmt.Errorf("upload to filer: %v", err)
}
defer resp.Body.Close()
respBody, err := io.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("read response: %v", err)
}
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated {
return fmt.Errorf("upload failed with status %d: %s", resp.StatusCode, string(respBody))
}
var result weed_server.FilerPostResult
if err := json.Unmarshal(respBody, &result); err != nil {
return fmt.Errorf("parse response: %v", err)
}
if result.Error != "" {
return fmt.Errorf("filer error: %s", result.Error)
}
// Update file ownership using the same pattern as other functions
if user != nil {
err := fs.callWithClient(false, func(ctx context.Context, client filer_pb.SeaweedFilerClient) error {
// Look up the file to get its current entry
lookupResp, err := client.LookupDirectoryEntry(ctx, &filer_pb.LookupDirectoryEntryRequest{
Directory: dir,
Name: filename,
})
if err != nil {
return fmt.Errorf("lookup file for attribute update: %v", err)
}
if lookupResp.Entry == nil {
return fmt.Errorf("file not found after upload: %s/%s", dir, filename)
}
// Update the entry with new uid/gid
entry := lookupResp.Entry
entry.Attributes.Uid = user.Uid
entry.Attributes.Gid = user.Gid
// Update the entry in the filer
_, err = client.UpdateEntry(ctx, &filer_pb.UpdateEntryRequest{
Directory: dir,
Entry: entry,
})
return err
})
if err != nil {
// Log the error but don't fail the whole operation
glog.Errorf("Failed to update file ownership for %s: %v", filepath, err)
}
}
return nil
}
// ==================== Common Arguments Helpers ====================
func FileInfoFromEntry(e *filer_pb.Entry) FileInfo {
@ -390,73 +412,6 @@ func (fi *EnhancedFileInfo) Owner() (uid, gid int) {
return int(fi.uid), int(fi.gid)
}
// SeaweedFileReaderAt implements io.ReaderAt for SeaweedFS files
type SeaweedFileReaderAt struct {
fs *SftpServer
entry *filer_pb.Entry
}
func (ra *SeaweedFileReaderAt) ReadAt(p []byte, off int64) (n int, err error) {
// Create a new reader for each ReadAt call
reader := filer.NewFileReader(ra.fs, ra.entry)
if reader == nil {
return 0, fmt.Errorf("failed to create file reader")
}
// Check if we're reading past the end of the file
fileSize := int64(ra.entry.Attributes.FileSize)
if off >= fileSize {
return 0, io.EOF
}
// Seek to the offset
if seeker, ok := reader.(io.Seeker); ok {
_, err = seeker.Seek(off, io.SeekStart)
if err != nil {
return 0, fmt.Errorf("seek error: %v", err)
}
} else {
// If the reader doesn't implement Seek, we need to read and discard bytes
toSkip := off
skipBuf := make([]byte, 8192)
for toSkip > 0 {
skipSize := int64(len(skipBuf))
if skipSize > toSkip {
skipSize = toSkip
}
read, err := reader.Read(skipBuf[:skipSize])
if err != nil {
return 0, fmt.Errorf("skip error: %v", err)
}
if read == 0 {
return 0, fmt.Errorf("unable to skip to offset %d", off)
}
toSkip -= int64(read)
}
}
// Adjust read length if it would go past EOF
readLen := len(p)
remaining := fileSize - off
if int64(readLen) > remaining {
readLen = int(remaining)
if readLen == 0 {
return 0, io.EOF
}
}
// Read the data
n, err = io.ReadFull(reader, p[:readLen])
// Handle EOF correctly
if err == io.ErrUnexpectedEOF || (err == nil && n < len(p)) {
err = io.EOF
}
return n, err
}
func (fs *SftpServer) checkFilePermission(filepath string, permissions string) error {
return fs.authManager.CheckPermission(fs.user, filepath, permissions)
return fs.CheckFilePermission(filepath, permissions)
}

View File

@ -1,11 +1,12 @@
package auth
package sftpd
import (
"context"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/sftpd/user"
)
@ -23,17 +24,6 @@ const (
PermReadWrite = "readwrite"
)
// PermissionChecker handles permission checking for file operations
// It verifies both Unix-style permissions and explicit ACLs defined in user configuration.
type PermissionChecker struct {
fsHelper FileSystemHelper
}
// FileSystemHelper provides necessary filesystem operations for permission checking
type FileSystemHelper interface {
GetEntry(path string) (*Entry, error)
}
// Entry represents a filesystem entry with attributes
type Entry struct {
IsDirectory bool
@ -50,23 +40,7 @@ type EntryAttributes struct {
SymlinkTarget string
}
// PermissionError represents a permission-related error
type PermissionError struct {
Path string
Perm string
User string
}
func (e *PermissionError) Error() string {
return fmt.Sprintf("permission denied: %s required on %s for user %s", e.Perm, e.Path, e.User)
}
// NewPermissionChecker creates a new permission checker
func NewPermissionChecker(fsHelper FileSystemHelper) *PermissionChecker {
return &PermissionChecker{
fsHelper: fsHelper,
}
}
// PermissionError represents a permission-related erro
// CheckFilePermission verifies if a user has the required permission on a path
// It first checks if the path is in the user's home directory with explicit permissions.
@ -78,45 +52,53 @@ func NewPermissionChecker(fsHelper FileSystemHelper) *PermissionChecker {
//
// Returns:
// - nil if permission is granted, error otherwise
func (pc *PermissionChecker) CheckFilePermission(user *user.User, path string, perm string) error {
if user == nil {
return &PermissionError{Path: path, Perm: perm, User: "unknown"}
func (fs *SftpServer) CheckFilePermission(path string, perm string) error {
if fs.user == nil {
glog.V(0).Infof("permission denied. No user associated with the SftpServer.")
return os.ErrPermission
}
// Retrieve metadata via helper
entry, err := pc.fsHelper.GetEntry(path)
// Special case for "create" or "write" permissions on non-existent paths
// Check parent directory permissions instead
entry, err := fs.getEntry(path)
if err != nil {
// If the path doesn't exist and we're checking for create/write/mkdir permission,
// check permissions on the parent directory instead
if err == os.ErrNotExist {
parentPath := filepath.Dir(path)
// Check if user can write to the parent directory
return fs.CheckFilePermission(parentPath, perm)
}
return fmt.Errorf("failed to get entry for path %s: %w", path, err)
}
// Rest of the function remains the same...
// Handle symlinks by resolving them
if entry.IsSymlink {
if entry.Attributes.GetSymlinkTarget() != "" {
// Get the actual entry for the resolved path
entry, err = pc.fsHelper.GetEntry(entry.Attributes.SymlinkTarget)
entry, err = fs.getEntry(entry.Attributes.GetSymlinkTarget())
if err != nil {
return fmt.Errorf("failed to get entry for resolved path %s: %w", entry.Attributes.SymlinkTarget, err)
}
// Store the original target
entry.Target = entry.Attributes.SymlinkTarget
}
// Special case: root user always has permission
if user.Username == "root" || user.Uid == 0 {
if fs.user.Username == "root" || fs.user.Uid == 0 {
return nil
}
// Check if path is within user's home directory and has explicit permissions
if isPathInHomeDirectory(user, path) {
if isPathInHomeDirectory(fs.user, path) {
// Check if user has explicit permissions for this path
if HasExplicitPermission(user, path, perm, entry.IsDirectory) {
if HasExplicitPermission(fs.user, path, perm, entry.IsDirectory) {
return nil
}
} else {
// For paths outside home directory or without explicit home permissions,
// check UNIX-style perms first
isOwner := user.Uid == entry.Attributes.Uid
isGroup := user.Gid == entry.Attributes.Gid
isOwner := fs.user.Uid == entry.Attributes.Uid
isGroup := fs.user.Gid == entry.Attributes.Gid
mode := os.FileMode(entry.Attributes.FileMode)
if HasUnixPermission(isOwner, isGroup, mode, entry.IsDirectory, perm) {
@ -124,23 +106,12 @@ func (pc *PermissionChecker) CheckFilePermission(user *user.User, path string, p
}
// Then check explicit ACLs
if HasExplicitPermission(user, path, perm, entry.IsDirectory) {
if HasExplicitPermission(fs.user, path, perm, entry.IsDirectory) {
return nil
}
}
return &PermissionError{Path: path, Perm: perm, User: user.Username}
}
// CheckFilePermissionWithContext is a context-aware version of CheckFilePermission
// that supports cancellation and timeouts
func (pc *PermissionChecker) CheckFilePermissionWithContext(ctx context.Context, user *user.User, path string, perm string) error {
// Check for context cancellation
if ctx.Err() != nil {
return ctx.Err()
}
return pc.CheckFilePermission(user, path, perm)
glog.V(0).Infof("permission denied for user %s on path %s for permission %s", fs.user.Username, path, perm)
return os.ErrPermission
}
// isPathInHomeDirectory checks if a path is in the user's home directory

View File

@ -2,12 +2,18 @@
package sftpd
import (
"context"
"fmt"
"io"
"os"
"time"
"github.com/pkg/sftp"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/sftpd/auth"
filer_pb "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/sftpd/user"
"github.com/seaweedfs/seaweedfs/weed/util"
"google.golang.org/grpc"
)
@ -17,16 +23,10 @@ type SftpServer struct {
dataCenter string
filerGroup string
user *user.User
authManager *auth.Manager
}
// NewSftpServer constructs the server.
func NewSftpServer(filerAddr pb.ServerAddress, grpcDialOption grpc.DialOption, dataCenter, filerGroup string, user *user.User) SftpServer {
// Create a file system helper for the auth manager
fsHelper := NewFileSystemHelper(filerAddr, grpcDialOption, dataCenter, filerGroup)
// Create an auth manager for permission checking
authManager := auth.NewManager(nil, fsHelper, []string{})
return SftpServer{
filerAddr: filerAddr,
@ -34,7 +34,6 @@ func NewSftpServer(filerAddr pb.ServerAddress, grpcDialOption grpc.DialOption, d
dataCenter: dataCenter,
filerGroup: filerGroup,
user: user,
authManager: authManager,
}
}
@ -57,3 +56,51 @@ func (fs *SftpServer) Filecmd(req *sftp.Request) error {
func (fs *SftpServer) Filelist(req *sftp.Request) (sftp.ListerAt, error) {
return fs.listDir(req)
}
// EnsureHomeDirectory creates the user's home directory if it doesn't exist
func (fs *SftpServer) EnsureHomeDirectory() error {
if fs.user.HomeDir == "" {
return fmt.Errorf("user has no home directory configured")
}
glog.V(0).Infof("Ensuring home directory exists for user %s: %s", fs.user.Username, fs.user.HomeDir)
// Check if home directory already exists
entry, err := fs.getEntry(fs.user.HomeDir)
if err == nil && entry != nil {
// Directory exists, just ensure proper ownership
if entry.Attributes.Uid != fs.user.Uid || entry.Attributes.Gid != fs.user.Gid {
dir, _ := util.FullPath(fs.user.HomeDir).DirAndName()
entry.Attributes.Uid = fs.user.Uid
entry.Attributes.Gid = fs.user.Gid
return fs.updateEntry(dir, entry)
}
return nil
}
// Skip permission check for home directory creation
// This is a special case where we want to create the directory regardless
dir, name := util.FullPath(fs.user.HomeDir).DirAndName()
// Create the directory with proper permissions using filer_pb.Mkdir
err = filer_pb.Mkdir(context.Background(), fs, dir, name, func(entry *filer_pb.Entry) {
mode := uint32(0700 | os.ModeDir) // Default to private permissions for home dirs
entry.Attributes.FileMode = mode
entry.Attributes.Uid = fs.user.Uid
entry.Attributes.Gid = fs.user.Gid
now := time.Now().Unix()
entry.Attributes.Crtime = now
entry.Attributes.Mtime = now
if entry.Extended == nil {
entry.Extended = make(map[string][]byte)
}
entry.Extended["creator"] = []byte(fs.user.Username)
})
if err != nil {
return fmt.Errorf("failed to create home directory: %v", err)
}
glog.V(0).Infof("Successfully created home directory for user %s: %s", fs.user.Username, fs.user.HomeDir)
return nil
}

View File

@ -5,7 +5,6 @@ import (
"context"
"fmt"
"io"
"log"
"net"
"os"
"path/filepath"
@ -14,10 +13,8 @@ import (
"github.com/pkg/sftp"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb"
filer_pb "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/sftpd/auth"
"github.com/seaweedfs/seaweedfs/weed/sftpd/user"
"github.com/seaweedfs/seaweedfs/weed/util"
"golang.org/x/crypto/ssh"
"google.golang.org/grpc"
)
@ -27,7 +24,6 @@ type SFTPService struct {
options SFTPServiceOptions
userStore user.Store
authManager *auth.Manager
homeManager *user.HomeManager
}
// SFTPServiceOptions contains all configuration options for the SFTP service.
@ -64,100 +60,12 @@ func NewSFTPService(options *SFTPServiceOptions) *SFTPService {
}
service.userStore = userStore
// Initialize file system helper for permission checking
fsHelper := NewFileSystemHelper(
options.Filer,
options.GrpcDialOption,
options.DataCenter,
options.FilerGroup,
)
// Initialize auth manager
service.authManager = auth.NewManager(userStore, fsHelper, options.AuthMethods)
// Initialize home directory manager
service.homeManager = user.NewHomeManager(fsHelper)
service.authManager = auth.NewManager(userStore, options.AuthMethods)
return &service
}
// FileSystemHelper implements auth.FileSystemHelper interface
type FileSystemHelper struct {
filerAddr pb.ServerAddress
grpcDialOption grpc.DialOption
dataCenter string
filerGroup string
}
func NewFileSystemHelper(filerAddr pb.ServerAddress, grpcDialOption grpc.DialOption, dataCenter, filerGroup string) *FileSystemHelper {
return &FileSystemHelper{
filerAddr: filerAddr,
grpcDialOption: grpcDialOption,
dataCenter: dataCenter,
filerGroup: filerGroup,
}
}
// GetEntry implements auth.FileSystemHelper interface
func (fs *FileSystemHelper) GetEntry(path string) (*auth.Entry, error) {
dir, name := util.FullPath(path).DirAndName()
var entry *filer_pb.Entry
err := fs.withTimeoutContext(func(ctx context.Context) error {
return fs.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
resp, err := client.LookupDirectoryEntry(ctx, &filer_pb.LookupDirectoryEntryRequest{
Directory: dir,
Name: name,
})
if err != nil {
return err
}
if resp.Entry == nil {
return fmt.Errorf("entry not found")
}
entry = resp.Entry
return nil
})
})
if err != nil {
return nil, err
}
return &auth.Entry{
IsDirectory: entry.IsDirectory,
Attributes: &auth.EntryAttributes{
Uid: entry.Attributes.GetUid(),
Gid: entry.Attributes.GetGid(),
FileMode: entry.Attributes.GetFileMode(),
SymlinkTarget: entry.Attributes.GetSymlinkTarget(),
},
IsSymlink: entry.Attributes.GetSymlinkTarget() != "",
}, nil
}
// Implement FilerClient interface for FileSystemHelper
func (fs *FileSystemHelper) AdjustedUrl(location *filer_pb.Location) string {
return location.Url
}
func (fs *FileSystemHelper) GetDataCenter() string {
return fs.dataCenter
}
func (fs *FileSystemHelper) WithFilerClient(streamingMode bool, fn func(filer_pb.SeaweedFilerClient) error) error {
addr := fs.filerAddr.ToGrpcAddress()
return pb.WithGrpcClient(streamingMode, util.RandomInt32(), func(conn *grpc.ClientConn) error {
return fn(filer_pb.NewSeaweedFilerClient(conn))
}, addr, false, fs.grpcDialOption)
}
func (fs *FileSystemHelper) withTimeoutContext(fn func(ctx context.Context) error) error {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
return fn(ctx)
}
// Serve accepts incoming connections on the provided listener and handles them.
func (s *SFTPService) Serve(listener net.Listener) error {
// Build SSH server config
@ -212,14 +120,14 @@ func (s *SFTPService) buildSSHConfig() (*ssh.ServerConfig, error) {
keyPath := filepath.Join(s.options.HostKeysFolder, file.Name())
if err := s.addHostKey(config, keyPath); err != nil {
// Log the error but continue with other keys
log.Printf("Warning: failed to add host key %s: %v", keyPath, err)
glog.V(0).Info(fmt.Sprintf("Failed to add host key %s: %v", keyPath, err))
continue
}
hostKeysAdded++
}
if hostKeysAdded == 0 {
log.Printf("Warning: no valid host keys found in folder %s", s.options.HostKeysFolder)
glog.V(0).Info(fmt.Sprintf("Warning: no valid host keys found in folder %s", s.options.HostKeysFolder))
}
}
@ -296,7 +204,7 @@ func (s *SFTPService) handleSSHConnection(conn net.Conn, config *ssh.ServerConfi
)
// Ensure home directory exists with proper permissions
if err := s.homeManager.EnsureHomeDirectory(sftpUser); err != nil {
if err := userFs.EnsureHomeDirectory(); err != nil {
glog.Errorf("Failed to ensure home directory for user %s: %v", username, err)
// We don't close the connection here, as the user might still be able to access other directories
}

View File

@ -1,143 +0,0 @@
package sftpd
import (
"crypto/subtle"
"encoding/json"
"fmt"
"os"
"strings"
"sync"
)
// UserStore interface for user management.
type UserStore interface {
GetUser(username string) (*User, error)
ValidatePassword(username string, password []byte) bool
ValidatePublicKey(username string, keyData string) bool
GetUserPermissions(username string, path string) []string
}
// User represents an SFTP user with authentication and permission details.
type User struct {
Username string
Password string // Plaintext password
PublicKeys []string // Authorized public keys
HomeDir string // User's home directory
Permissions map[string][]string // path -> permissions (read, write, list, etc.)
Uid uint32 // User ID for file ownership
Gid uint32 // Group ID for file ownership
}
// FileUserStore implements UserStore using a JSON file.
type FileUserStore struct {
filePath string
users map[string]*User
mu sync.RWMutex
}
// NewFileUserStore creates a new user store from a JSON file.
func NewFileUserStore(filePath string) (*FileUserStore, error) {
store := &FileUserStore{
filePath: filePath,
users: make(map[string]*User),
}
if err := store.loadUsers(); err != nil {
return nil, err
}
return store, nil
}
// loadUsers loads users from the JSON file.
func (s *FileUserStore) loadUsers() error {
s.mu.Lock()
defer s.mu.Unlock()
// Check if file exists
if _, err := os.Stat(s.filePath); os.IsNotExist(err) {
return fmt.Errorf("user store file not found: %s", s.filePath)
}
data, err := os.ReadFile(s.filePath)
if err != nil {
return fmt.Errorf("failed to read user store file: %v", err)
}
var users []*User
if err := json.Unmarshal(data, &users); err != nil {
return fmt.Errorf("failed to parse user store file: %v", err)
}
for _, user := range users {
s.users[user.Username] = user
}
return nil
}
// GetUser returns a user by username.
func (s *FileUserStore) GetUser(username string) (*User, error) {
s.mu.RLock()
defer s.mu.RUnlock()
user, ok := s.users[username]
if !ok {
return nil, fmt.Errorf("user not found: %s", username)
}
return user, nil
}
// ValidatePassword checks if the password is valid for the user.
func (s *FileUserStore) ValidatePassword(username string, password []byte) bool {
user, err := s.GetUser(username)
if err != nil {
return false
}
// Compare plaintext password using constant time comparison for security
return subtle.ConstantTimeCompare([]byte(user.Password), password) == 1
}
// ValidatePublicKey checks if the public key is valid for the user.
func (s *FileUserStore) ValidatePublicKey(username string, keyData string) bool {
user, err := s.GetUser(username)
if err != nil {
return false
}
for _, key := range user.PublicKeys {
if subtle.ConstantTimeCompare([]byte(key), []byte(keyData)) == 1 {
return true
}
}
return false
}
// GetUserPermissions returns the permissions for a user on a path.
func (s *FileUserStore) GetUserPermissions(username string, path string) []string {
user, err := s.GetUser(username)
if err != nil {
return nil
}
// Check exact path match first
if perms, ok := user.Permissions[path]; ok {
return perms
}
// Check parent directories
var bestMatch string
var bestPerms []string
for p, perms := range user.Permissions {
if strings.HasPrefix(path, p) && len(p) > len(bestMatch) {
bestMatch = p
bestPerms = perms
}
}
return bestPerms
}

View File

@ -17,6 +17,39 @@ type FileStore struct {
mu sync.RWMutex
}
// Store defines the interface for user storage and retrieval
type Store interface {
// GetUser retrieves a user by username
GetUser(username string) (*User, error)
// ValidatePassword checks if the password is valid for the user
ValidatePassword(username string, password []byte) bool
// ValidatePublicKey checks if the public key is valid for the user
ValidatePublicKey(username string, keyData string) bool
// GetUserPermissions returns the permissions for a user on a path
GetUserPermissions(username string, path string) []string
// SaveUser saves or updates a user
SaveUser(user *User) error
// DeleteUser removes a user
DeleteUser(username string) error
// ListUsers returns all usernames
ListUsers() ([]string, error)
}
// UserNotFoundError is returned when a user is not found
type UserNotFoundError struct {
Username string
}
func (e *UserNotFoundError) Error() string {
return fmt.Sprintf("user not found: %s", e.Username)
}
// NewFileStore creates a new user store from a JSON file
func NewFileStore(filePath string) (*FileStore, error) {
store := &FileStore{
@ -128,7 +161,7 @@ func (s *FileStore) ValidatePublicKey(username string, keyData string) bool {
}
for _, key := range user.PublicKeys {
if key == keyData {
if subtle.ConstantTimeCompare([]byte(key), []byte(keyData)) == 1 {
return true
}
}

View File

@ -1,204 +0,0 @@
package user
import (
"context"
"fmt"
"os"
"path/filepath"
"strings"
"time"
"github.com/seaweedfs/seaweedfs/weed/glog"
filer_pb "github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/util"
)
// HomeManager handles user home directory operations
type HomeManager struct {
filerClient FilerClient
}
// FilerClient defines the interface for interacting with the filer
type FilerClient interface {
WithFilerClient(streamingMode bool, fn func(client filer_pb.SeaweedFilerClient) error) error
GetDataCenter() string
AdjustedUrl(location *filer_pb.Location) string
}
// NewHomeManager creates a new home directory manager
func NewHomeManager(filerClient FilerClient) *HomeManager {
return &HomeManager{
filerClient: filerClient,
}
}
// EnsureHomeDirectory creates the user's home directory if it doesn't exist
func (hm *HomeManager) EnsureHomeDirectory(user *User) error {
if user.HomeDir == "" {
return fmt.Errorf("user has no home directory configured")
}
glog.V(0).Infof("Ensuring home directory exists for user %s: %s", user.Username, user.HomeDir)
// Check if home directory exists and create it if needed
err := hm.createDirectoryIfNotExists(user.HomeDir, user)
if err != nil {
return fmt.Errorf("failed to ensure home directory: %v", err)
}
// Update user permissions map to include the home directory with full access if not already present
if user.Permissions == nil {
user.Permissions = make(map[string][]string)
}
// Only add permissions if not already present
if _, exists := user.Permissions[user.HomeDir]; !exists {
user.Permissions[user.HomeDir] = []string{"all"}
glog.V(0).Infof("Added full permissions for user %s to home directory %s",
user.Username, user.HomeDir)
}
return nil
}
// createDirectoryIfNotExists creates a directory path if it doesn't exist
func (hm *HomeManager) createDirectoryIfNotExists(dirPath string, user *User) error {
// Split the path into components
components := strings.Split(strings.Trim(dirPath, "/"), "/")
currentPath := "/"
for _, component := range components {
if component == "" {
continue
}
nextPath := filepath.Join(currentPath, component)
err := hm.createSingleDirectory(nextPath, user)
if err != nil {
return err
}
currentPath = nextPath
}
return nil
}
// createSingleDirectory creates a single directory if it doesn't exist
func (hm *HomeManager) createSingleDirectory(dirPath string, user *User) error {
var dirExists bool
err := hm.filerClient.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
dir, name := util.FullPath(dirPath).DirAndName()
// Check if directory exists
resp, err := client.LookupDirectoryEntry(ctx, &filer_pb.LookupDirectoryEntryRequest{
Directory: dir,
Name: name,
})
if err != nil || resp.Entry == nil {
// Directory doesn't exist, create it
glog.V(0).Infof("Creating directory %s for user %s", dirPath, user.Username)
err = filer_pb.Mkdir(context.Background(), hm, string(dir), name, func(entry *filer_pb.Entry) {
// Set appropriate permissions
entry.Attributes.FileMode = uint32(0700 | os.ModeDir) // rwx------ for user
entry.Attributes.Uid = user.Uid
entry.Attributes.Gid = user.Gid
// Set creation and modification times
now := time.Now().Unix()
entry.Attributes.Crtime = now
entry.Attributes.Mtime = now
// Add extended attributes
if entry.Extended == nil {
entry.Extended = make(map[string][]byte)
}
entry.Extended["creator"] = []byte(user.Username)
entry.Extended["auto_created"] = []byte("true")
})
if err != nil {
return fmt.Errorf("failed to create directory %s: %v", dirPath, err)
}
} else if !resp.Entry.IsDirectory {
return fmt.Errorf("path %s exists but is not a directory", dirPath)
} else {
dirExists = true
// Update ownership if needed
if resp.Entry.Attributes.Uid != user.Uid || resp.Entry.Attributes.Gid != user.Gid {
glog.V(0).Infof("Updating ownership of directory %s for user %s", dirPath, user.Username)
entry := resp.Entry
entry.Attributes.Uid = user.Uid
entry.Attributes.Gid = user.Gid
_, updateErr := client.UpdateEntry(ctx, &filer_pb.UpdateEntryRequest{
Directory: dir,
Entry: entry,
})
if updateErr != nil {
glog.Warningf("Failed to update directory ownership: %v", updateErr)
}
}
}
return nil
})
if err != nil {
return err
}
if !dirExists {
// Verify the directory was created
verifyErr := hm.filerClient.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
dir, name := util.FullPath(dirPath).DirAndName()
resp, err := client.LookupDirectoryEntry(ctx, &filer_pb.LookupDirectoryEntryRequest{
Directory: dir,
Name: name,
})
if err != nil || resp.Entry == nil {
return fmt.Errorf("directory not found after creation")
}
if !resp.Entry.IsDirectory {
return fmt.Errorf("path exists but is not a directory")
}
dirExists = true
return nil
})
if verifyErr != nil {
return fmt.Errorf("failed to verify directory creation: %v", verifyErr)
}
}
return nil
}
// Implement necessary methods to satisfy the filer_pb.FilerClient interface
func (hm *HomeManager) AdjustedUrl(location *filer_pb.Location) string {
return hm.filerClient.AdjustedUrl(location)
}
func (hm *HomeManager) GetDataCenter() string {
return hm.filerClient.GetDataCenter()
}
// WithFilerClient delegates to the underlying filer client
func (hm *HomeManager) WithFilerClient(streamingMode bool, fn func(client filer_pb.SeaweedFilerClient) error) error {
return hm.filerClient.WithFilerClient(streamingMode, fn)
}

View File

@ -2,7 +2,6 @@
package user
import (
"fmt"
"math/rand"
"path/filepath"
)
@ -18,39 +17,6 @@ type User struct {
Gid uint32 // Group ID for file ownership
}
// Store defines the interface for user storage and retrieval
type Store interface {
// GetUser retrieves a user by username
GetUser(username string) (*User, error)
// ValidatePassword checks if the password is valid for the user
ValidatePassword(username string, password []byte) bool
// ValidatePublicKey checks if the public key is valid for the user
ValidatePublicKey(username string, keyData string) bool
// GetUserPermissions returns the permissions for a user on a path
GetUserPermissions(username string, path string) []string
// SaveUser saves or updates a user
SaveUser(user *User) error
// DeleteUser removes a user
DeleteUser(username string) error
// ListUsers returns all usernames
ListUsers() ([]string, error)
}
// UserNotFoundError is returned when a user is not found
type UserNotFoundError struct {
Username string
}
func (e *UserNotFoundError) Error() string {
return fmt.Sprintf("user not found: %s", e.Username)
}
// NewUser creates a new user with default settings
func NewUser(username string) *User {
// Generate a random UID/GID between 1000 and 60000

View File

@ -0,0 +1,52 @@
package utils
import (
"container/list"
)
type CacheEntry struct {
key int64
value []byte
}
type LruCache struct {
capacity int
ll *list.List
cache map[int64]*list.Element
}
func NewLRUCache(capacity int) *LruCache {
return &LruCache{
capacity: capacity,
ll: list.New(),
cache: make(map[int64]*list.Element),
}
}
func (c *LruCache) Get(key int64) ([]byte, bool) {
if ele, ok := c.cache[key]; ok {
c.ll.MoveToFront(ele)
return ele.Value.(*CacheEntry).value, true
}
return nil, false
}
func (c *LruCache) Put(key int64, value []byte) {
if ele, ok := c.cache[key]; ok {
c.ll.MoveToFront(ele)
ele.Value.(*CacheEntry).value = value
return
}
if c.ll.Len() >= c.capacity {
oldest := c.ll.Back()
if oldest != nil {
c.ll.Remove(oldest)
delete(c.cache, oldest.Value.(*CacheEntry).key)
}
}
entry := &CacheEntry{key, value}
ele := c.ll.PushFront(entry)
c.cache[key] = ele
}