diff --git a/CHANGELOG.md b/CHANGELOG.md index 0989b27..ba0e75d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,6 +23,7 @@ - Fix error trace reporting for functions executed with `Server:exec()` (gh-396). - Remove pretty-printing of `luatest.log` arguments. +- Add `cluster` helper as a tool for managing a Tarantool cluster (gh-368). ## 1.0.1 diff --git a/config.ld b/config.ld index fa6308d..77e613a 100644 --- a/config.ld +++ b/config.ld @@ -10,7 +10,8 @@ file = { 'luatest/justrun.lua', 'luatest/cbuilder.lua', 'luatest/hooks.lua', - 'luatest/treegen.lua' + 'luatest/treegen.lua', + 'luatest/cluster.lua' } topics = { 'CHANGELOG.md', diff --git a/luatest/cluster.lua b/luatest/cluster.lua new file mode 100644 index 0000000..28ca9bc --- /dev/null +++ b/luatest/cluster.lua @@ -0,0 +1,355 @@ +--- Tarantool 3.0+ cluster management utils. +-- +-- The helper is used to automatically collect a set of +-- instances from the provided configuration and automatically +-- set up servers per each configured instance. +-- +-- @usage +-- +-- local cluster = Cluster:new(config) +-- cluster:start() +-- cluster['instance-001']:exec(<...>) +-- cluster:each(function(server) +-- server:exec(<...>) +-- end) +-- +-- After setting up a cluster object the following methods could +-- be used to interact with it: +-- +-- * :start() Startup the cluster. +-- * :start_instance() Startup a specific instance. +-- * :stop() Stop the cluster. +-- * :each() Execute a function on each instance. +-- * :size() get an amount of instances +-- * :drop() Drop the cluster. +-- * :sync() Sync the configuration and collect a new set of +-- instances +-- * :reload() Reload the configuration. +-- +-- The module can also be used for testing failure startup +-- cases: +-- +-- Cluster:startup_error(config, error_message) +-- +-- @module luatest.cluster + +local fun = require('fun') +local yaml = require('yaml') +local assertions = require('luatest.assertions') +local helpers = require('luatest.helpers') +local hooks = require('luatest.hooks') +local treegen = require('luatest.treegen') +local justrun = require('luatest.justrun') +local server = require('luatest.server') + +local Cluster = require('luatest.class').new() + +-- Cluster uses custom __index implementation to support +-- getting instances from it using `cluster['i-001']`. +-- +-- Thus, we need to change the metatable of the class +-- with a custom __index method. +local mt = getmetatable(Cluster) +mt.__index = function(self, k) + local method = rawget(mt, k) + if method ~= nil then + return method + end + + local server_map = rawget(self, '_server_map') + if server_map ~= nil and server_map[k] ~= nil then + return server_map[k] + end + + return rawget(self, k) +end + +local cluster = { + _group = {} +} + +function Cluster:inherit(object) + setmetatable(object, self) + self.__index = self + return object +end + +local function init(g) + cluster._group = g +end + +-- Stop all the managed instances using :drop(). +local function drop(g) + if g._cluster ~= nil then + g._cluster:drop() + end + g._cluster = nil +end + +local function clean(g) + assert(g._cluster == nil) +end + +-- {{{ Helpers + +-- Collect names of all the instances defined in the config +-- in the alphabetical order. +local function instance_names_from_config(config) + local instance_names = {} + for _, group in pairs(config.groups or {}) do + for _, replicaset in pairs(group.replicasets or {}) do + for name, _ in pairs(replicaset.instances or {}) do + table.insert(instance_names, name) + end + end + end + table.sort(instance_names) + return instance_names +end + +-- }}} Helpers + +-- {{{ Cluster management + +--- Execute for server in the cluster. +-- +-- @func f Function to execute with a server as the first param. +function Cluster:each(f) + fun.iter(self._servers):each(function(iserver) + f(iserver) + end) +end + +--- Get cluster size. +-- +-- @return number. +function Cluster:size() + return #self._servers +end + +--- Start all the instances. +-- +-- @tab[opt] opts Cluster startup options. +-- @bool[opt] opts.wait_until_ready Wait until servers are ready +-- (default: false). +function Cluster:start(opts) + self:each(function(iserver) + iserver:start({wait_until_ready = false}) + end) + + -- wait_until_ready is true by default. + local wait_until_ready = true + if opts ~= nil and opts.wait_until_ready ~= nil then + wait_until_ready = opts.wait_until_ready + end + + if wait_until_ready then + self:each(function(iserver) + iserver:wait_until_ready() + end) + end + + -- wait_until_running is equal to wait_until_ready by default. + local wait_until_running = wait_until_ready + if opts ~= nil and opts.wait_until_running ~= nil then + wait_until_running = opts.wait_until_running + end + + if wait_until_running then + self:each(function(iserver) + helpers.retrying({timeout = 60}, function() + assertions.assert_equals(iserver:eval('return box.info.status'), + 'running') + end) + + end) + end +end + +--- Start the given instance. +-- +-- @string instance_name Instance name. +function Cluster:start_instance(instance_name) + local iserver = self._server_map[instance_name] + assert(iserver ~= nil) + iserver:start() +end + +--- Stop the whole cluster. +function Cluster:stop() + for _, iserver in ipairs(self._servers or {}) do + iserver:stop() + end +end + +--- Drop the cluster's servers. +function Cluster:drop() + for _, iserver in ipairs(self._servers or {}) do + iserver:drop() + end + self._servers = nil + self._server_map = nil +end + +--- Sync the cluster object with the new config. +-- +-- It performs the following actions. +-- +-- * Write the new config into the config file. +-- * Update the internal list of instances. +-- +-- @tab config New config. +function Cluster:sync(config) + assert(type(config) == 'table') + + local instance_names = instance_names_from_config(config) + + treegen.write_file(self._dir, self._config_file_rel, yaml.encode(config)) + + for i, name in ipairs(instance_names) do + if self._server_map[name] == nil then + local iserver = server:new(fun.chain(self._server_opts, { + alias = name, + }):tomap()) + table.insert(self._servers, i, iserver) + self._server_map[name] = iserver + end + end + +end + +--- Reload configuration on all the instances. +-- +-- @tab[opt] config New config. +function Cluster:reload(config) + assert(config == nil or type(config) == 'table') + + -- Rewrite the configuration file if a new config is provided. + if config ~= nil then + treegen.write_file(self._dir, self._config_file_rel, + yaml.encode(config)) + end + + -- Reload config on all the instances. + self:each(function(iserver) + -- Assume that all the instances are started. + -- + -- This requirement may be relaxed if needed, it is just + -- for simplicity. + assert(iserver.process ~= nil) + + iserver:exec(function() + local cfg = require('config') + + cfg:reload() + end) + end) +end + +--- Create a new Tarantool cluster. +-- +-- @tab config Cluster configuration. +-- @tab[opt] server_opts Extra options passed to server:new(). +-- @tab[opt] opts Cluster options. +-- @string[opt] opts.dir Specific directory for the cluster. +-- @return table +function Cluster:new(config, server_opts, opts) + local g = cluster._group + + assert(type(config) == 'table') + assert(config._config == nil, "Please provide cbuilder:new():config()") + assert(g._cluster == nil) + + -- Prepare a temporary directory and write a configuration + -- file. + local dir = opts and opts.dir or treegen.prepare_directory({}, {}) + local config_file_rel = 'config.yaml' + local config_file = treegen.write_file(dir, config_file_rel, + yaml.encode(config)) + + -- Collect names of all the instances defined in the config + -- in the alphabetical order. + local instance_names = instance_names_from_config(config) + + assert(next(instance_names) ~= nil, 'No instances in the supplied config') + + -- Generate luatest server options. + server_opts = fun.chain({ + config_file = config_file, + chdir = dir, + net_box_credentials = { + user = 'client', + password = 'secret', + }, + }, server_opts or {}):tomap() + + -- Create luatest server objects. + local servers = {} + local server_map = {} + for _, name in ipairs(instance_names) do + local iserver = server:new(fun.chain(server_opts, { + alias = name, + }):tomap()) + table.insert(servers, iserver) + server_map[name] = iserver + end + + -- Store a cluster object in 'g'. + self._servers = servers + self._server_map = server_map + self._dir = dir + self._config_file_rel = config_file_rel + self._server_opts = server_opts + + g._cluster = self + + return self +end + +-- }}} Replicaset management + +-- {{{ Replicaset that can't start + +--- Ensure cluster startup error +-- +-- Starts a all instance of a cluster from the given config and +-- ensure that all the instances fails to start and reports the +-- given error message. +-- +-- @tab config Cluster configuration. +-- @string exp_err Expected error message. +function Cluster:startup_error(config, exp_err) + -- Stub for the linter, since self is unused though + -- we need to be consistent with Cluster:new() + assert(self) + assert(type(config) == 'table') + assert(config._config == nil, "Please provide cbuilder:new():config()") + -- Prepare a temporary directory and write a configuration + -- file. + local dir = treegen.prepare_directory({}, {}) + local config_file_rel = 'config.yaml' + local config_file = treegen.write_file(dir, config_file_rel, + yaml.encode(config)) + + -- Collect names of all the instances defined in the config + -- in the alphabetical order. + local instance_names = instance_names_from_config(config) + + for _, name in ipairs(instance_names) do + local env = {} + local args = {'--name', name, '--config', config_file} + local opts = {nojson = true, stderr = true} + local res = justrun.tarantool(dir, env, args, opts) + + assertions.assert_equals(res.exit_code, 1) + assertions.assert_str_contains(res.stderr, exp_err) + end +end + +-- }}} Replicaset that can't start + +hooks.before_all_preloaded(init) +hooks.after_each_preloaded(drop) +hooks.after_all_preloaded(clean) + +return Cluster diff --git a/luatest/init.lua b/luatest/init.lua index e572072..e3b2071 100644 --- a/luatest/init.lua +++ b/luatest/init.lua @@ -48,6 +48,11 @@ luatest.justrun = require('luatest.justrun') -- @see luatest.cbuilder luatest.cbuilder = require('luatest.cbuilder') +--- Tarantool cluster management utils. +-- +-- @see luatest.cluster +luatest.cluster = require('luatest.cluster') + --- Add before suite hook. -- -- @function before_suite diff --git a/test/cluster_test.lua b/test/cluster_test.lua new file mode 100644 index 0000000..51bdf7f --- /dev/null +++ b/test/cluster_test.lua @@ -0,0 +1,256 @@ +local t = require('luatest') +local cbuilder = require('luatest.cbuilder') +local cluster = require('luatest.cluster') +local utils = require('luatest.utils') +local fio = require('fio') + +local g = t.group() + +local root = fio.dirname(fio.abspath('test.helpers')) + +-- These are extra server opts passed to the cluster. +-- They are needed for the server to be able to access +-- luatest.coverage. +local server_opts = { + env = { + LUA_PATH = root .. '/?.lua;' .. + root .. '/?/init.lua;' .. + root .. '/.rocks/share/tarantool/?.lua', + } +} + +local function assert_instance_running(c, instance, replicaset) + local server = c[instance] + t.assert(type(server) == 'table') + + t.assert_equals(server:eval('return box.info.name'), instance) + + if replicaset ~= nil then + t.assert_equals(server:eval('return box.info.replicaset.name'), + replicaset) + end +end + +local function assert_instance_stopped(c, instance) + local server = c[instance] + t.assert(type(server) == 'table') + t.assert_is(server.process, nil) +end + +g.test_start_stop = function() + local function assert_instance_is_ro(c, instance, is_ro) + local server = c[instance] + t.assert(type(server) == 'table') + + t.assert_equals(server:eval('return box.info.ro'), is_ro) + end + + t.run_only_if(utils.version_current_ge_than(3, 0, 0), + [[Declarative configuration works on Tarantool 3.0.0+. + See tarantool/tarantool@13149d65bc9d for details]]) + + local config = cbuilder:new() + :use_group('group-a') + :use_replicaset('replicaset-x') + :set_replicaset_option('replication.failover', 'manual') + :set_replicaset_option('leader', 'instance-x1') + :add_instance('instance-x1', {}) + :add_instance('instance-x2', {}) + + :use_group('group-b') + :use_replicaset('replicaset-y') + :set_replicaset_option('replication.failover', 'manual') + :set_replicaset_option('leader', 'instance-y1') + :add_instance('instance-y1', {}) + :add_instance('instance-y2', {}) + + :config() + + local c = cluster:new(config, server_opts) + c:start() + + assert_instance_running(c, 'instance-x1', 'replicaset-x') + assert_instance_running(c, 'instance-x2', 'replicaset-x') + assert_instance_running(c, 'instance-y1', 'replicaset-y') + assert_instance_running(c, 'instance-y2', 'replicaset-y') + + assert_instance_is_ro(c, 'instance-x1', false) + assert_instance_is_ro(c, 'instance-x2', true) + assert_instance_is_ro(c, 'instance-y1', false) + assert_instance_is_ro(c, 'instance-y2', true) + + c:stop() + + assert_instance_stopped(c, 'instance-x1') + assert_instance_stopped(c, 'instance-x2') + assert_instance_stopped(c, 'instance-y1') + assert_instance_stopped(c, 'instance-y2') +end + +g.test_start_instance = function() + t.run_only_if(utils.version_current_ge_than(3, 0, 0), + [[Declarative configuration works on Tarantool 3.0.0+. + See tarantool/tarantool@13149d65bc9d for details]]) + + t.assert_equals(g.cluster, nil) + + local config = cbuilder:new() + :use_group('g-001') + :use_replicaset('r-001') + :add_instance('i-001', {}) + :use_replicaset('r-002') + :add_instance('i-002', {}) + + :use_group('g-002') + :use_replicaset('r-003') + :add_instance('i-003', {}) + + :config() + + local c = cluster:new(config, server_opts) + + t.assert_equals(c:size(), 3) + c:start_instance('i-002') + + assert_instance_running(c, 'i-002') + + assert_instance_stopped(c, 'i-001') + assert_instance_stopped(c, 'i-003') + + c:stop() + + assert_instance_stopped(c, 'i-002') +end + +g.test_sync = function() + t.run_only_if(utils.version_current_ge_than(3, 0, 0), + [[Declarative configuration works on Tarantool 3.0.0+. + See tarantool/tarantool@13149d65bc9d for details]]) + + t.assert_equals(g._cluster, nil) + + local config = cbuilder:new() + :use_group('g-001') + :use_replicaset('r-001') + :add_instance('i-001', {}) + :config() + + local c = cluster:new(config, server_opts) + + t.assert_equals(c:size(), 1) + + c:start() + assert_instance_running(c, 'i-001') + + c:stop() + assert_instance_stopped(c, 'i-001') + + local config2 = cbuilder:new() + :use_group('g-001') + :use_replicaset('r-001') + :add_instance('i-002', {}) + + :use_group('g-002') + :use_replicaset('r-002') + :add_instance('i-003', {}) + + :config() + + c:sync(config2) + + t.assert_equals(c:size(), 3) + + c:start_instance('i-002') + c:start_instance('i-003') + assert_instance_running(c, 'i-002') + assert_instance_running(c, 'i-003') + + c:stop() + assert_instance_stopped(c, 'i-002') + assert_instance_stopped(c, 'i-003') +end + +g.test_reload = function() + t.run_only_if(utils.version_current_ge_than(3, 0, 0), + [[Declarative configuration works on Tarantool 3.0.0+. + See tarantool/tarantool@13149d65bc9d for details]]) + + local function assert_instance_failover_mode(c, instance, mode) + local server = c._server_map[instance] + t.assert_equals( + server:eval('return require("config"):get("replication.failover")'), + mode) + end + + t.assert_equals(g._cluster, nil) + + local config = cbuilder:new() + :set_global_option('replication.failover', 'election') + :use_group('g-001') + :use_replicaset('r-001') + :add_instance('i-001', {}) + :add_instance('i-002', {}) + + :use_replicaset('r-002') + :add_instance('i-003', {}) + + :config() + + local c = cluster:new(config, server_opts) + c:start() + + assert_instance_failover_mode(c, 'i-001', 'election') + assert_instance_failover_mode(c, 'i-002', 'election') + assert_instance_failover_mode(c, 'i-003', 'election') + + local config2 = cbuilder:new(config) + :set_global_option('replication.failover', 'off') + :config() + + c:reload(config2) + + assert_instance_failover_mode(c, 'i-001', 'off') + assert_instance_failover_mode(c, 'i-002', 'off') + assert_instance_failover_mode(c, 'i-003', 'off') +end + +g.test_each = function() + t.run_only_if(utils.version_current_ge_than(3, 0, 0), + [[Declarative configuration works on Tarantool 3.0.0+. + See tarantool/tarantool@13149d65bc9d for details]]) + + local config = cbuilder:new() + :use_group('g-001') + :use_replicaset('r-001') + :add_instance('i-001', {}) + :add_instance('i-002', {}) + + :use_replicaset('r-002') + :add_instance('i-003', {}) + + :config() + + local c = cluster:new(config, server_opts) + + local res = {} + c:each(function(server) + table.insert(res, server.alias) + end) + + t.assert_items_equals(res, {'i-001', 'i-002', 'i-003'}) +end + +g.test_startup_error = function() + t.run_only_if(utils.version_current_ge_than(3, 0, 0), + [[Declarative configuration works on Tarantool 3.0.0+. + See tarantool/tarantool@13149d65bc9d for details]]) + + local config = cbuilder:new() + :use_group('g-001') + :use_replicaset('r-001') + :add_instance('i-001', {}) + :set_global_option('app.file', 'non-existent.lua') + :config() + + cluster:startup_error(config, 'No such file') +end