Tarantool filer store (#6669)
Co-authored-by: Marat Karimov <m.karimov@digitalms.ru>
This commit is contained in:
17
docker/Dockerfile.tarantool.dev_env
Normal file
17
docker/Dockerfile.tarantool.dev_env
Normal file
@@ -0,0 +1,17 @@
|
||||
FROM tarantool/tarantool:3.3.1 AS builder
|
||||
|
||||
# install dependencies
|
||||
RUN apt update && \
|
||||
apt install -y git unzip cmake tt=2.7.0
|
||||
|
||||
# init tt dir structure, create dir for app, create symlink
|
||||
RUN tt init && \
|
||||
mkdir app && \
|
||||
ln -sfn ${PWD}/app/ ${PWD}/instances.enabled/app
|
||||
|
||||
# copy cluster configs
|
||||
COPY tarantool /opt/tarantool/app
|
||||
|
||||
# build app
|
||||
RUN tt build app
|
||||
|
||||
@@ -22,7 +22,7 @@ build: binary
|
||||
build_e2e: binary_race
|
||||
docker build --no-cache -t chrislusf/seaweedfs:e2e -f Dockerfile.e2e .
|
||||
|
||||
go_build: # make go_build tags=elastic,ydb,gocdk,hdfs,5BytesOffset
|
||||
go_build: # make go_build tags=elastic,ydb,gocdk,hdfs,5BytesOffset,tarantool
|
||||
docker build --build-arg TAGS=$(tags) --no-cache -t chrislusf/seaweedfs:go_build -f Dockerfile.go_build .
|
||||
|
||||
go_build_large_disk:
|
||||
@@ -37,6 +37,9 @@ build_rocksdb_local: build_rocksdb_dev_env
|
||||
build_rocksdb:
|
||||
docker build --no-cache -t chrislusf/seaweedfs:rocksdb -f Dockerfile.rocksdb_large .
|
||||
|
||||
build_tarantool_dev_env:
|
||||
docker build --no-cache -t chrislusf/tarantool_dev_env -f Dockerfile.tarantool.dev_env .
|
||||
|
||||
s3tests_build:
|
||||
docker build --no-cache -t chrislusf/ceph-s3-tests:local -f Dockerfile.s3tests .
|
||||
|
||||
@@ -106,9 +109,12 @@ test_etcd: build
|
||||
|
||||
test_ydb: tags = ydb
|
||||
test_ydb: build
|
||||
export
|
||||
docker compose -f compose/test-ydb-filer.yml -p seaweedfs up
|
||||
|
||||
test_tarantool: tags = tarantool
|
||||
test_tarantool: build_tarantool_dev_env build
|
||||
docker compose -f compose/test-tarantool-filer.yml -p seaweedfs up
|
||||
|
||||
clean:
|
||||
rm ./weed
|
||||
|
||||
|
||||
30
docker/compose/test-tarantool-filer.yml
Normal file
30
docker/compose/test-tarantool-filer.yml
Normal file
@@ -0,0 +1,30 @@
|
||||
version: '3.9'
|
||||
|
||||
services:
|
||||
tarantool:
|
||||
image: chrislusf/tarantool_dev_env
|
||||
entrypoint: "tt start app -i"
|
||||
environment:
|
||||
APP_USER_PASSWORD: "app"
|
||||
CLIENT_USER_PASSWORD: "client"
|
||||
REPLICATOR_USER_PASSWORD: "replicator"
|
||||
STORAGE_USER_PASSWORD: "storage"
|
||||
network_mode: "host"
|
||||
ports:
|
||||
- "3303:3303"
|
||||
|
||||
s3:
|
||||
image: chrislusf/seaweedfs:local
|
||||
command: "server -ip=127.0.0.1 -filer -master.volumeSizeLimitMB=16 -volume.max=0 -volume -volume.preStopSeconds=1 -s3 -s3.config=/etc/seaweedfs/s3.json -s3.port=8000 -s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=false"
|
||||
volumes:
|
||||
- ./s3.json:/etc/seaweedfs/s3.json
|
||||
environment:
|
||||
WEED_LEVELDB2_ENABLED: "false"
|
||||
WEED_TARANTOOL_ENABLED: "true"
|
||||
WEED_TARANTOOL_ADDRESS: "127.0.0.1:3303"
|
||||
WEED_TARANTOOL_USER: "client"
|
||||
WEED_TARANTOOL_PASSWORD: "client"
|
||||
WEED_MASTER_VOLUME_GROWTH_COPY_OTHER: 1
|
||||
network_mode: "host"
|
||||
depends_on:
|
||||
- tarantool
|
||||
14
docker/tarantool/app-scm-1.rockspec
Normal file
14
docker/tarantool/app-scm-1.rockspec
Normal file
@@ -0,0 +1,14 @@
|
||||
package = 'app'
|
||||
version = 'scm-1'
|
||||
source = {
|
||||
url = '/dev/null',
|
||||
}
|
||||
dependencies = {
|
||||
'crud == 1.5.2-1',
|
||||
'expirationd == 1.6.0-1',
|
||||
'metrics-export-role == 0.3.0-1',
|
||||
'vshard == 0.1.32-1'
|
||||
}
|
||||
build = {
|
||||
type = 'none';
|
||||
}
|
||||
145
docker/tarantool/config.yaml
Normal file
145
docker/tarantool/config.yaml
Normal file
@@ -0,0 +1,145 @@
|
||||
config:
|
||||
context:
|
||||
app_user_password:
|
||||
from: env
|
||||
env: APP_USER_PASSWORD
|
||||
client_user_password:
|
||||
from: env
|
||||
env: CLIENT_USER_PASSWORD
|
||||
replicator_user_password:
|
||||
from: env
|
||||
env: REPLICATOR_USER_PASSWORD
|
||||
storage_user_password:
|
||||
from: env
|
||||
env: STORAGE_USER_PASSWORD
|
||||
|
||||
credentials:
|
||||
roles:
|
||||
crud-role:
|
||||
privileges:
|
||||
- permissions: [ "execute" ]
|
||||
lua_call: [ "crud.delete", "crud.get", "crud.upsert" ]
|
||||
users:
|
||||
app:
|
||||
password: '{{ context.app_user_password }}'
|
||||
roles: [ public, crud-role ]
|
||||
client:
|
||||
password: '{{ context.client_user_password }}'
|
||||
roles: [ super ]
|
||||
replicator:
|
||||
password: '{{ context.replicator_user_password }}'
|
||||
roles: [ replication ]
|
||||
storage:
|
||||
password: '{{ context.storage_user_password }}'
|
||||
roles: [ sharding ]
|
||||
|
||||
iproto:
|
||||
advertise:
|
||||
peer:
|
||||
login: replicator
|
||||
sharding:
|
||||
login: storage
|
||||
|
||||
sharding:
|
||||
bucket_count: 10000
|
||||
|
||||
metrics:
|
||||
include: [ all ]
|
||||
exclude: [ vinyl ]
|
||||
labels:
|
||||
alias: '{{ instance_name }}'
|
||||
|
||||
|
||||
groups:
|
||||
storages:
|
||||
roles:
|
||||
- roles.crud-storage
|
||||
- roles.expirationd
|
||||
- roles.metrics-export
|
||||
roles_cfg:
|
||||
roles.expirationd:
|
||||
cfg:
|
||||
metrics: true
|
||||
filer_metadata_task:
|
||||
space: filer_metadata
|
||||
is_expired: filer_metadata.is_expired
|
||||
options:
|
||||
atomic_iteration: true
|
||||
force: true
|
||||
index: 'expire_at_idx'
|
||||
iterator_type: GT
|
||||
start_key:
|
||||
- 0
|
||||
tuples_per_iteration: 10000
|
||||
app:
|
||||
module: storage
|
||||
sharding:
|
||||
roles: [ storage ]
|
||||
replication:
|
||||
failover: election
|
||||
database:
|
||||
use_mvcc_engine: true
|
||||
replicasets:
|
||||
storage-001:
|
||||
instances:
|
||||
storage-001-a:
|
||||
roles_cfg:
|
||||
roles.metrics-export:
|
||||
http:
|
||||
- listen: '0.0.0.0:8081'
|
||||
endpoints:
|
||||
- path: /metrics/prometheus/
|
||||
format: prometheus
|
||||
- path: /metrics/json
|
||||
format: json
|
||||
iproto:
|
||||
listen:
|
||||
- uri: 127.0.0.1:3301
|
||||
advertise:
|
||||
client: 127.0.0.1:3301
|
||||
storage-001-b:
|
||||
roles_cfg:
|
||||
roles.metrics-export:
|
||||
http:
|
||||
- listen: '0.0.0.0:8082'
|
||||
endpoints:
|
||||
- path: /metrics/prometheus/
|
||||
format: prometheus
|
||||
- path: /metrics/json
|
||||
format: json
|
||||
iproto:
|
||||
listen:
|
||||
- uri: 127.0.0.1:3302
|
||||
advertise:
|
||||
client: 127.0.0.1:3302
|
||||
routers:
|
||||
roles:
|
||||
- roles.crud-router
|
||||
- roles.metrics-export
|
||||
roles_cfg:
|
||||
roles.crud-router:
|
||||
stats: true
|
||||
stats_driver: metrics
|
||||
stats_quantiles: true
|
||||
app:
|
||||
module: router
|
||||
sharding:
|
||||
roles: [ router ]
|
||||
replicasets:
|
||||
router-001:
|
||||
instances:
|
||||
router-001-a:
|
||||
roles_cfg:
|
||||
roles.metrics-export:
|
||||
http:
|
||||
- listen: '0.0.0.0:8083'
|
||||
endpoints:
|
||||
- path: /metrics/prometheus/
|
||||
format: prometheus
|
||||
- path: /metrics/json
|
||||
format: json
|
||||
iproto:
|
||||
listen:
|
||||
- uri: 127.0.0.1:3303
|
||||
advertise:
|
||||
client: 127.0.0.1:3303
|
||||
7
docker/tarantool/instances.yaml
Normal file
7
docker/tarantool/instances.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
---
|
||||
storage-001-a:
|
||||
|
||||
storage-001-b:
|
||||
|
||||
router-001-a:
|
||||
|
||||
77
docker/tarantool/router.lua
Normal file
77
docker/tarantool/router.lua
Normal file
@@ -0,0 +1,77 @@
|
||||
local vshard = require('vshard')
|
||||
local log = require('log')
|
||||
|
||||
-- Bootstrap the vshard router.
|
||||
while true do
|
||||
local ok, err = vshard.router.bootstrap({
|
||||
if_not_bootstrapped = true,
|
||||
})
|
||||
if ok then
|
||||
break
|
||||
end
|
||||
log.info(('Router bootstrap error: %s'):format(err))
|
||||
end
|
||||
|
||||
-- functions for filer_metadata space
|
||||
local filer_metadata = {
|
||||
delete_by_directory_idx = function(directory)
|
||||
-- find all storages
|
||||
local storages = require('vshard').router.routeall()
|
||||
-- on each storage
|
||||
for _, storage in pairs(storages) do
|
||||
-- call local function
|
||||
local result, err = storage:callrw('filer_metadata.delete_by_directory_idx', { directory })
|
||||
-- check for error
|
||||
if err then
|
||||
error("Failed to call function on storage: " .. tostring(err))
|
||||
end
|
||||
end
|
||||
-- return
|
||||
return true
|
||||
end,
|
||||
find_by_directory_idx_and_name = function(dirPath, startFileName, includeStartFile, limit)
|
||||
-- init results
|
||||
local results = {}
|
||||
-- find all storages
|
||||
local storages = require('vshard').router.routeall()
|
||||
-- on each storage
|
||||
for _, storage in pairs(storages) do
|
||||
-- call local function
|
||||
local result, err = storage:callro('filer_metadata.find_by_directory_idx_and_name', {
|
||||
dirPath,
|
||||
startFileName,
|
||||
includeStartFile,
|
||||
limit
|
||||
})
|
||||
-- check for error
|
||||
if err then
|
||||
error("Failed to call function on storage: " .. tostring(err))
|
||||
end
|
||||
-- add to results
|
||||
for _, tuple in ipairs(result) do
|
||||
table.insert(results, tuple)
|
||||
end
|
||||
end
|
||||
-- sort
|
||||
table.sort(results, function(a, b) return a[3] < b[3] end)
|
||||
-- apply limit
|
||||
if #results > limit then
|
||||
local limitedResults = {}
|
||||
for i = 1, limit do
|
||||
table.insert(limitedResults, results[i])
|
||||
end
|
||||
results = limitedResults
|
||||
end
|
||||
-- return
|
||||
return results
|
||||
end,
|
||||
}
|
||||
|
||||
rawset(_G, 'filer_metadata', filer_metadata)
|
||||
|
||||
-- register functions for filer_metadata space, set grants
|
||||
for name, _ in pairs(filer_metadata) do
|
||||
box.schema.func.create('filer_metadata.' .. name, { if_not_exists = true })
|
||||
box.schema.user.grant('app', 'execute', 'function', 'filer_metadata.' .. name, { if_not_exists = true })
|
||||
box.schema.user.grant('client', 'execute', 'function', 'filer_metadata.' .. name, { if_not_exists = true })
|
||||
end
|
||||
97
docker/tarantool/storage.lua
Normal file
97
docker/tarantool/storage.lua
Normal file
@@ -0,0 +1,97 @@
|
||||
box.watch('box.status', function()
|
||||
if box.info.ro then
|
||||
return
|
||||
end
|
||||
|
||||
-- ====================================
|
||||
-- key_value space
|
||||
-- ====================================
|
||||
box.schema.create_space('key_value', {
|
||||
format = {
|
||||
{ name = 'key', type = 'string' },
|
||||
{ name = 'bucket_id', type = 'unsigned' },
|
||||
{ name = 'value', type = 'string' }
|
||||
},
|
||||
if_not_exists = true
|
||||
})
|
||||
|
||||
-- create key_value space indexes
|
||||
box.space.key_value:create_index('id', {type = 'tree', parts = { 'key' }, unique = true, if_not_exists = true})
|
||||
box.space.key_value:create_index('bucket_id', { type = 'tree', parts = { 'bucket_id' }, unique = false, if_not_exists = true })
|
||||
|
||||
-- ====================================
|
||||
-- filer_metadata space
|
||||
-- ====================================
|
||||
box.schema.create_space('filer_metadata', {
|
||||
format = {
|
||||
{ name = 'directory', type = 'string' },
|
||||
{ name = 'bucket_id', type = 'unsigned' },
|
||||
{ name = 'name', type = 'string' },
|
||||
{ name = 'expire_at', type = 'unsigned' },
|
||||
{ name = 'data', type = 'string' }
|
||||
},
|
||||
if_not_exists = true
|
||||
})
|
||||
|
||||
-- create filer_metadata space indexes
|
||||
box.space.filer_metadata:create_index('id', {type = 'tree', parts = { 'directory', 'name' }, unique = true, if_not_exists = true})
|
||||
box.space.filer_metadata:create_index('bucket_id', { type = 'tree', parts = { 'bucket_id' }, unique = false, if_not_exists = true })
|
||||
box.space.filer_metadata:create_index('directory_idx', { type = 'tree', parts = { 'directory' }, unique = false, if_not_exists = true })
|
||||
box.space.filer_metadata:create_index('name_idx', { type = 'tree', parts = { 'name' }, unique = false, if_not_exists = true })
|
||||
box.space.filer_metadata:create_index('expire_at_idx', { type = 'tree', parts = { 'expire_at' }, unique = false, if_not_exists = true})
|
||||
end)
|
||||
|
||||
-- functions for filer_metadata space
|
||||
local filer_metadata = {
|
||||
delete_by_directory_idx = function(directory)
|
||||
local space = box.space.filer_metadata
|
||||
local index = space.index.directory_idx
|
||||
-- for each finded directories
|
||||
for _, tuple in index:pairs({ directory }, { iterator = 'EQ' }) do
|
||||
space:delete({ tuple[1], tuple[3] })
|
||||
end
|
||||
return true
|
||||
end,
|
||||
find_by_directory_idx_and_name = function(dirPath, startFileName, includeStartFile, limit)
|
||||
local space = box.space.filer_metadata
|
||||
local directory_idx = space.index.directory_idx
|
||||
-- choose filter name function
|
||||
local filter_filename_func
|
||||
if includeStartFile then
|
||||
filter_filename_func = function(value) return value >= startFileName end
|
||||
else
|
||||
filter_filename_func = function(value) return value > startFileName end
|
||||
end
|
||||
-- init results
|
||||
local results = {}
|
||||
-- for each finded directories
|
||||
for _, tuple in directory_idx:pairs({ dirPath }, { iterator = 'EQ' }) do
|
||||
-- filter by name
|
||||
if filter_filename_func(tuple[3]) then
|
||||
table.insert(results, tuple)
|
||||
end
|
||||
end
|
||||
-- sort
|
||||
table.sort(results, function(a, b) return a[3] < b[3] end)
|
||||
-- apply limit
|
||||
if #results > limit then
|
||||
local limitedResults = {}
|
||||
for i = 1, limit do
|
||||
table.insert(limitedResults, results[i])
|
||||
end
|
||||
results = limitedResults
|
||||
end
|
||||
-- return
|
||||
return results
|
||||
end,
|
||||
is_expired = function(args, tuple)
|
||||
return (tuple[4] > 0) and (require('fiber').time() > tuple[4])
|
||||
end
|
||||
}
|
||||
|
||||
-- register functions for filer_metadata space, set grants
|
||||
rawset(_G, 'filer_metadata', filer_metadata)
|
||||
for name, _ in pairs(filer_metadata) do
|
||||
box.schema.func.create('filer_metadata.' .. name, { setuid = true, if_not_exists = true })
|
||||
box.schema.user.grant('storage', 'execute', 'function', 'filer_metadata.' .. name, { if_not_exists = true })
|
||||
end
|
||||
Reference in New Issue
Block a user