Update tikv client version and add one PC support

This commit is contained in:
yulai.li
2022-06-26 22:43:37 +08:00
677 changed files with 41666 additions and 18455 deletions

View File

@@ -44,7 +44,7 @@ dbFile = "./filer.db" # sqlite db file
# CREATE TABLE IF NOT EXISTS filemeta (
# dirhash BIGINT COMMENT 'first 64 bits of MD5 hash value of directory field',
# name VARCHAR(1000) BINARY COMMENT 'directory or file name',
# directory TEXT COMMENT 'full path to parent directory',
# directory TEXT BINARY COMMENT 'full path to parent directory',
# meta LONGBLOB,
# PRIMARY KEY (dirhash, name)
# ) DEFAULT CHARSET=utf8;
@@ -61,15 +61,15 @@ connection_max_lifetime_seconds = 0
interpolateParams = false
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
enableUpsert = true
upsertQuery = """INSERT INTO ` + "`%s`" + ` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)"""
upsertQuery = """INSERT INTO `%s` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)"""
[mysql2] # or memsql, tidb
enabled = false
createTable = """
CREATE TABLE IF NOT EXISTS ` + "`%s`" + ` (
CREATE TABLE IF NOT EXISTS `%s` (
dirhash BIGINT,
name VARCHAR(1000) BINARY,
directory TEXT,
directory TEXT BINARY,
meta LONGBLOB,
PRIMARY KEY (dirhash, name)
) DEFAULT CHARSET=utf8;
@@ -85,7 +85,7 @@ connection_max_lifetime_seconds = 0
interpolateParams = false
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
enableUpsert = true
upsertQuery = """INSERT INTO ` + "`%s`" + ` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)"""
upsertQuery = """INSERT INTO `%s` (dirhash,name,directory,meta) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE meta = VALUES(meta)"""
[postgres] # or cockroachdb, YugabyteDB
# CREATE TABLE IF NOT EXISTS filemeta (
@@ -153,6 +153,8 @@ password = ""
superLargeDirectories = []
# Name of the datacenter local to this filer, used as host selection fallback.
localDC = ""
# Gocql connection timeout, default: 600ms
connection_timeout_millisecond = 600
[hbase]
enabled = false
@@ -167,6 +169,14 @@ database = 0
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
superLargeDirectories = []
[redis2_sentinel]
enabled = false
addresses = ["172.22.12.7:26379","172.22.12.8:26379","172.22.12.9:26379"]
masterName = "master"
username = ""
password = ""
database = 0
[redis_cluster2]
enabled = false
addresses = [
@@ -185,6 +195,70 @@ routeByLatency = false
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
superLargeDirectories = []
[redis_lua]
enabled = false
address = "localhost:6379"
password = ""
database = 0
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
superLargeDirectories = []
[redis_lua_sentinel]
enabled = false
addresses = ["172.22.12.7:26379","172.22.12.8:26379","172.22.12.9:26379"]
masterName = "master"
username = ""
password = ""
database = 0
[redis_lua_cluster]
enabled = false
addresses = [
"localhost:30001",
"localhost:30002",
"localhost:30003",
"localhost:30004",
"localhost:30005",
"localhost:30006",
]
password = ""
# allows reads from slave servers or the master, but all writes still go to the master
readOnly = false
# automatically use the closest Redis server for reads
routeByLatency = false
# This changes the data layout. Only add new directories. Removing/Updating will cause data loss.
superLargeDirectories = []
[redis3] # beta
enabled = false
address = "localhost:6379"
password = ""
database = 0
[redis3_sentinel]
enabled = false
addresses = ["172.22.12.7:26379","172.22.12.8:26379","172.22.12.9:26379"]
masterName = "master"
username = ""
password = ""
database = 0
[redis_cluster3] # beta
enabled = false
addresses = [
"localhost:30001",
"localhost:30002",
"localhost:30003",
"localhost:30004",
"localhost:30005",
"localhost:30006",
]
password = ""
# allows reads from slave servers or the master, but all writes still go to the master
readOnly = false
# automatically use the closest Redis server for reads
routeByLatency = false
[etcd]
enabled = false
servers = "localhost:2379"
@@ -211,6 +285,29 @@ healthcheck_enabled = false
index.max_result_window = 10000
[arangodb] # in development dont use it
enabled = false
db_name = "seaweedfs"
servers=["http://localhost:8529"] # list of servers to connect to
# only basic auth supported for now
username=""
password=""
# skip tls cert validation
insecure_skip_verify = true
[ydb] # https://ydb.tech/
enabled = false
dsn = "grpc://localhost:2136?database=/local"
prefix = "seaweedfs"
useBucketPrefix = true # Fast Bucket Deletion
poolSizeLimit = 50
dialTimeOut = 10
# Authenticate produced with one of next environment variables:
# YDB_SERVICE_ACCOUNT_KEY_FILE_CREDENTIALS=<path/to/sa_key_file> — used service account key file by path
# YDB_ANONYMOUS_CREDENTIALS="1" — used for authenticate with anonymous access. Anonymous access needs for connect to testing YDB installation
# YDB_METADATA_CREDENTIALS="1" — used metadata service for authenticate to YDB from yandex cloud virtual machine or from yandex function
# YDB_ACCESS_TOKEN_CREDENTIALS=<access_token> — used for authenticate to YDB with short-life access token. For example, access token may be IAM token
##########################
##########################
@@ -238,3 +335,5 @@ enabled = false
pdaddrs = "localhost:2379"
# Concurrency for TiKV delete range
deleterange_concurrency = 1
# Enable 1PC
enable_1pc = false

View File

@@ -14,19 +14,14 @@ scripts = """
volume.deleteEmpty -quietFor=24h -force
volume.balance -force
volume.fix.replication
s3.clean.uploads -timeAgo=24h
unlock
"""
sleep_minutes = 17 # sleep minutes between each script execution
[master.filer]
default = "localhost:8888" # used by maintenance scripts if the scripts needs to use fs related commands
[master.sequencer]
type = "raft" # Choose [raft|etcd|snowflake] type for storing the file id sequence
# when sequencer.type = etcd, set listen client urls of etcd cluster that store file id sequence
# example : http://127.0.0.1:2379,http://127.0.0.1:2389
sequencer_etcd_urls = "http://127.0.0.1:2379"
type = "raft" # Choose [raft|snowflake] type for storing the file id sequence
# when sequencer.type = snowflake, the snowflake id must be different from other masters
sequencer_snowflake_id = 0 # any number between 1~1023
@@ -41,6 +36,7 @@ aws_secret_access_key = "" # if empty, loads from the shared credentials fil
region = "us-east-2"
bucket = "your_bucket_name" # an existing bucket
endpoint = ""
storage_class = "STANDARD_IA"
# create this number of logical volumes if no more writable volumes
# count_x means how many copies of data.

View File

@@ -4,17 +4,46 @@
# /etc/seaweedfs/security.toml
# this file is read by master, volume server, and filer
# the jwt signing key is read by master and volume server.
# a jwt defaults to expire after 10 seconds.
# this jwt signing key is read by master and volume server, and it is used for write operations:
# - the Master server generates the JWT, which can be used to write a certain file on a volume server
# - the Volume server validates the JWT on writing
# the jwt defaults to expire after 10 seconds.
[jwt.signing]
key = ""
expires_after_seconds = 10 # seconds
# jwt for read is only supported with master+volume setup. Filer does not support this mode.
# by default, if the signing key above is set, the Volume UI over HTTP is disabled.
# by setting ui.access to true, you can re-enable the Volume UI. Despite
# some information leakage (as the UI is not authenticated), this should not
# pose a security risk.
[access]
ui = false
# this jwt signing key is read by master and volume server, and it is used for read operations:
# - the Master server generates the JWT, which can be used to read a certain file on a volume server
# - the Volume server validates the JWT on reading
# NOTE: jwt for read is only supported with master+volume setup. Filer does not support this mode.
[jwt.signing.read]
key = ""
expires_after_seconds = 10 # seconds
# If this JWT key is configured, Filer only accepts writes over HTTP if they are signed with this JWT:
# - f.e. the S3 API Shim generates the JWT
# - the Filer server validates the JWT on writing
# the jwt defaults to expire after 10 seconds.
[jwt.filer_signing]
key = ""
expires_after_seconds = 10 # seconds
# If this JWT key is configured, Filer only accepts reads over HTTP if they are signed with this JWT:
# - f.e. the S3 API Shim generates the JWT
# - the Filer server validates the JWT on writing
# the jwt defaults to expire after 10 seconds.
[jwt.filer_signing.read]
key = ""
expires_after_seconds = 10 # seconds
# all grpc tls authentications are mutual
# the values for the following ca, cert, and key are paths to the PERM files.
# the host name is not checked, so the PERM files can be shared.
@@ -38,6 +67,11 @@ cert = ""
key = ""
allowed_commonNames = "" # comma-separated SSL certificate common names
[grpc.s3]
cert = ""
key = ""
allowed_commonNames = "" # comma-separated SSL certificate common names
[grpc.msg_broker]
cert = ""
key = ""
@@ -54,7 +88,13 @@ key = ""
# this does not work with other clients, e.g., "weed filer|mount" etc, yet.
[https.client]
enabled = true
[https.volume]
cert = ""
key = ""
ca = ""
[https.master]
cert = ""
key = ""
ca = ""