mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2025-09-20 07:27:57 +08:00
Tarantool filer store (#6669)
Co-authored-by: Marat Karimov <m.karimov@digitalms.ru>
This commit is contained in:
14
docker/tarantool/app-scm-1.rockspec
Normal file
14
docker/tarantool/app-scm-1.rockspec
Normal file
@@ -0,0 +1,14 @@
|
||||
package = 'app'
|
||||
version = 'scm-1'
|
||||
source = {
|
||||
url = '/dev/null',
|
||||
}
|
||||
dependencies = {
|
||||
'crud == 1.5.2-1',
|
||||
'expirationd == 1.6.0-1',
|
||||
'metrics-export-role == 0.3.0-1',
|
||||
'vshard == 0.1.32-1'
|
||||
}
|
||||
build = {
|
||||
type = 'none';
|
||||
}
|
145
docker/tarantool/config.yaml
Normal file
145
docker/tarantool/config.yaml
Normal file
@@ -0,0 +1,145 @@
|
||||
config:
|
||||
context:
|
||||
app_user_password:
|
||||
from: env
|
||||
env: APP_USER_PASSWORD
|
||||
client_user_password:
|
||||
from: env
|
||||
env: CLIENT_USER_PASSWORD
|
||||
replicator_user_password:
|
||||
from: env
|
||||
env: REPLICATOR_USER_PASSWORD
|
||||
storage_user_password:
|
||||
from: env
|
||||
env: STORAGE_USER_PASSWORD
|
||||
|
||||
credentials:
|
||||
roles:
|
||||
crud-role:
|
||||
privileges:
|
||||
- permissions: [ "execute" ]
|
||||
lua_call: [ "crud.delete", "crud.get", "crud.upsert" ]
|
||||
users:
|
||||
app:
|
||||
password: '{{ context.app_user_password }}'
|
||||
roles: [ public, crud-role ]
|
||||
client:
|
||||
password: '{{ context.client_user_password }}'
|
||||
roles: [ super ]
|
||||
replicator:
|
||||
password: '{{ context.replicator_user_password }}'
|
||||
roles: [ replication ]
|
||||
storage:
|
||||
password: '{{ context.storage_user_password }}'
|
||||
roles: [ sharding ]
|
||||
|
||||
iproto:
|
||||
advertise:
|
||||
peer:
|
||||
login: replicator
|
||||
sharding:
|
||||
login: storage
|
||||
|
||||
sharding:
|
||||
bucket_count: 10000
|
||||
|
||||
metrics:
|
||||
include: [ all ]
|
||||
exclude: [ vinyl ]
|
||||
labels:
|
||||
alias: '{{ instance_name }}'
|
||||
|
||||
|
||||
groups:
|
||||
storages:
|
||||
roles:
|
||||
- roles.crud-storage
|
||||
- roles.expirationd
|
||||
- roles.metrics-export
|
||||
roles_cfg:
|
||||
roles.expirationd:
|
||||
cfg:
|
||||
metrics: true
|
||||
filer_metadata_task:
|
||||
space: filer_metadata
|
||||
is_expired: filer_metadata.is_expired
|
||||
options:
|
||||
atomic_iteration: true
|
||||
force: true
|
||||
index: 'expire_at_idx'
|
||||
iterator_type: GT
|
||||
start_key:
|
||||
- 0
|
||||
tuples_per_iteration: 10000
|
||||
app:
|
||||
module: storage
|
||||
sharding:
|
||||
roles: [ storage ]
|
||||
replication:
|
||||
failover: election
|
||||
database:
|
||||
use_mvcc_engine: true
|
||||
replicasets:
|
||||
storage-001:
|
||||
instances:
|
||||
storage-001-a:
|
||||
roles_cfg:
|
||||
roles.metrics-export:
|
||||
http:
|
||||
- listen: '0.0.0.0:8081'
|
||||
endpoints:
|
||||
- path: /metrics/prometheus/
|
||||
format: prometheus
|
||||
- path: /metrics/json
|
||||
format: json
|
||||
iproto:
|
||||
listen:
|
||||
- uri: 127.0.0.1:3301
|
||||
advertise:
|
||||
client: 127.0.0.1:3301
|
||||
storage-001-b:
|
||||
roles_cfg:
|
||||
roles.metrics-export:
|
||||
http:
|
||||
- listen: '0.0.0.0:8082'
|
||||
endpoints:
|
||||
- path: /metrics/prometheus/
|
||||
format: prometheus
|
||||
- path: /metrics/json
|
||||
format: json
|
||||
iproto:
|
||||
listen:
|
||||
- uri: 127.0.0.1:3302
|
||||
advertise:
|
||||
client: 127.0.0.1:3302
|
||||
routers:
|
||||
roles:
|
||||
- roles.crud-router
|
||||
- roles.metrics-export
|
||||
roles_cfg:
|
||||
roles.crud-router:
|
||||
stats: true
|
||||
stats_driver: metrics
|
||||
stats_quantiles: true
|
||||
app:
|
||||
module: router
|
||||
sharding:
|
||||
roles: [ router ]
|
||||
replicasets:
|
||||
router-001:
|
||||
instances:
|
||||
router-001-a:
|
||||
roles_cfg:
|
||||
roles.metrics-export:
|
||||
http:
|
||||
- listen: '0.0.0.0:8083'
|
||||
endpoints:
|
||||
- path: /metrics/prometheus/
|
||||
format: prometheus
|
||||
- path: /metrics/json
|
||||
format: json
|
||||
iproto:
|
||||
listen:
|
||||
- uri: 127.0.0.1:3303
|
||||
advertise:
|
||||
client: 127.0.0.1:3303
|
7
docker/tarantool/instances.yaml
Normal file
7
docker/tarantool/instances.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
storage-001-a:
|
||||
|
||||
storage-001-b:
|
||||
|
||||
router-001-a:
|
||||
|
77
docker/tarantool/router.lua
Normal file
77
docker/tarantool/router.lua
Normal file
@@ -0,0 +1,77 @@
|
||||
local vshard = require('vshard')
|
||||
local log = require('log')
|
||||
|
||||
-- Bootstrap the vshard router.
|
||||
while true do
|
||||
local ok, err = vshard.router.bootstrap({
|
||||
if_not_bootstrapped = true,
|
||||
})
|
||||
if ok then
|
||||
break
|
||||
end
|
||||
log.info(('Router bootstrap error: %s'):format(err))
|
||||
end
|
||||
|
||||
-- functions for filer_metadata space
|
||||
local filer_metadata = {
|
||||
delete_by_directory_idx = function(directory)
|
||||
-- find all storages
|
||||
local storages = require('vshard').router.routeall()
|
||||
-- on each storage
|
||||
for _, storage in pairs(storages) do
|
||||
-- call local function
|
||||
local result, err = storage:callrw('filer_metadata.delete_by_directory_idx', { directory })
|
||||
-- check for error
|
||||
if err then
|
||||
error("Failed to call function on storage: " .. tostring(err))
|
||||
end
|
||||
end
|
||||
-- return
|
||||
return true
|
||||
end,
|
||||
find_by_directory_idx_and_name = function(dirPath, startFileName, includeStartFile, limit)
|
||||
-- init results
|
||||
local results = {}
|
||||
-- find all storages
|
||||
local storages = require('vshard').router.routeall()
|
||||
-- on each storage
|
||||
for _, storage in pairs(storages) do
|
||||
-- call local function
|
||||
local result, err = storage:callro('filer_metadata.find_by_directory_idx_and_name', {
|
||||
dirPath,
|
||||
startFileName,
|
||||
includeStartFile,
|
||||
limit
|
||||
})
|
||||
-- check for error
|
||||
if err then
|
||||
error("Failed to call function on storage: " .. tostring(err))
|
||||
end
|
||||
-- add to results
|
||||
for _, tuple in ipairs(result) do
|
||||
table.insert(results, tuple)
|
||||
end
|
||||
end
|
||||
-- sort
|
||||
table.sort(results, function(a, b) return a[3] < b[3] end)
|
||||
-- apply limit
|
||||
if #results > limit then
|
||||
local limitedResults = {}
|
||||
for i = 1, limit do
|
||||
table.insert(limitedResults, results[i])
|
||||
end
|
||||
results = limitedResults
|
||||
end
|
||||
-- return
|
||||
return results
|
||||
end,
|
||||
}
|
||||
|
||||
rawset(_G, 'filer_metadata', filer_metadata)
|
||||
|
||||
-- register functions for filer_metadata space, set grants
|
||||
for name, _ in pairs(filer_metadata) do
|
||||
box.schema.func.create('filer_metadata.' .. name, { if_not_exists = true })
|
||||
box.schema.user.grant('app', 'execute', 'function', 'filer_metadata.' .. name, { if_not_exists = true })
|
||||
box.schema.user.grant('client', 'execute', 'function', 'filer_metadata.' .. name, { if_not_exists = true })
|
||||
end
|
97
docker/tarantool/storage.lua
Normal file
97
docker/tarantool/storage.lua
Normal file
@@ -0,0 +1,97 @@
|
||||
box.watch('box.status', function()
|
||||
if box.info.ro then
|
||||
return
|
||||
end
|
||||
|
||||
-- ====================================
|
||||
-- key_value space
|
||||
-- ====================================
|
||||
box.schema.create_space('key_value', {
|
||||
format = {
|
||||
{ name = 'key', type = 'string' },
|
||||
{ name = 'bucket_id', type = 'unsigned' },
|
||||
{ name = 'value', type = 'string' }
|
||||
},
|
||||
if_not_exists = true
|
||||
})
|
||||
|
||||
-- create key_value space indexes
|
||||
box.space.key_value:create_index('id', {type = 'tree', parts = { 'key' }, unique = true, if_not_exists = true})
|
||||
box.space.key_value:create_index('bucket_id', { type = 'tree', parts = { 'bucket_id' }, unique = false, if_not_exists = true })
|
||||
|
||||
-- ====================================
|
||||
-- filer_metadata space
|
||||
-- ====================================
|
||||
box.schema.create_space('filer_metadata', {
|
||||
format = {
|
||||
{ name = 'directory', type = 'string' },
|
||||
{ name = 'bucket_id', type = 'unsigned' },
|
||||
{ name = 'name', type = 'string' },
|
||||
{ name = 'expire_at', type = 'unsigned' },
|
||||
{ name = 'data', type = 'string' }
|
||||
},
|
||||
if_not_exists = true
|
||||
})
|
||||
|
||||
-- create filer_metadata space indexes
|
||||
box.space.filer_metadata:create_index('id', {type = 'tree', parts = { 'directory', 'name' }, unique = true, if_not_exists = true})
|
||||
box.space.filer_metadata:create_index('bucket_id', { type = 'tree', parts = { 'bucket_id' }, unique = false, if_not_exists = true })
|
||||
box.space.filer_metadata:create_index('directory_idx', { type = 'tree', parts = { 'directory' }, unique = false, if_not_exists = true })
|
||||
box.space.filer_metadata:create_index('name_idx', { type = 'tree', parts = { 'name' }, unique = false, if_not_exists = true })
|
||||
box.space.filer_metadata:create_index('expire_at_idx', { type = 'tree', parts = { 'expire_at' }, unique = false, if_not_exists = true})
|
||||
end)
|
||||
|
||||
-- functions for filer_metadata space
|
||||
local filer_metadata = {
|
||||
delete_by_directory_idx = function(directory)
|
||||
local space = box.space.filer_metadata
|
||||
local index = space.index.directory_idx
|
||||
-- for each finded directories
|
||||
for _, tuple in index:pairs({ directory }, { iterator = 'EQ' }) do
|
||||
space:delete({ tuple[1], tuple[3] })
|
||||
end
|
||||
return true
|
||||
end,
|
||||
find_by_directory_idx_and_name = function(dirPath, startFileName, includeStartFile, limit)
|
||||
local space = box.space.filer_metadata
|
||||
local directory_idx = space.index.directory_idx
|
||||
-- choose filter name function
|
||||
local filter_filename_func
|
||||
if includeStartFile then
|
||||
filter_filename_func = function(value) return value >= startFileName end
|
||||
else
|
||||
filter_filename_func = function(value) return value > startFileName end
|
||||
end
|
||||
-- init results
|
||||
local results = {}
|
||||
-- for each finded directories
|
||||
for _, tuple in directory_idx:pairs({ dirPath }, { iterator = 'EQ' }) do
|
||||
-- filter by name
|
||||
if filter_filename_func(tuple[3]) then
|
||||
table.insert(results, tuple)
|
||||
end
|
||||
end
|
||||
-- sort
|
||||
table.sort(results, function(a, b) return a[3] < b[3] end)
|
||||
-- apply limit
|
||||
if #results > limit then
|
||||
local limitedResults = {}
|
||||
for i = 1, limit do
|
||||
table.insert(limitedResults, results[i])
|
||||
end
|
||||
results = limitedResults
|
||||
end
|
||||
-- return
|
||||
return results
|
||||
end,
|
||||
is_expired = function(args, tuple)
|
||||
return (tuple[4] > 0) and (require('fiber').time() > tuple[4])
|
||||
end
|
||||
}
|
||||
|
||||
-- register functions for filer_metadata space, set grants
|
||||
rawset(_G, 'filer_metadata', filer_metadata)
|
||||
for name, _ in pairs(filer_metadata) do
|
||||
box.schema.func.create('filer_metadata.' .. name, { setuid = true, if_not_exists = true })
|
||||
box.schema.user.grant('storage', 'execute', 'function', 'filer_metadata.' .. name, { if_not_exists = true })
|
||||
end
|
Reference in New Issue
Block a user