Skip to content

Commit

Permalink
cleanup: don't pass around unused metrics_dir config
Browse files Browse the repository at this point in the history
  • Loading branch information
hlolli committed Jan 15, 2024
1 parent 94cf4c8 commit 40152b4
Show file tree
Hide file tree
Showing 11 changed files with 41 additions and 83 deletions.
1 change: 0 additions & 1 deletion apps/arweave/include/ar_config.hrl
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,6 @@
sync_from_local_peers_only = false,
data_dir = ".",
log_dir = ?LOG_DIR,
metrics_dir = ?METRICS_DIR,
polling = ?DEFAULT_POLLING_INTERVAL, % Polling frequency in seconds.
block_pollers = ?DEFAULT_BLOCK_POLLERS,
auto_join = true,
Expand Down
6 changes: 1 addition & 5 deletions apps/arweave/src/ar.erl
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,6 @@ show_help() ->
{"log_dir", "The directory for logs. If the \"debug\" flag is set, the debug logs "
"are written to logs/debug_logs/. The RocksDB logs are written to "
"logs/rocksdb/."},
{"metrics_dir", "The directory for persisted metrics."},
{"storage_module", "A storage module is responsible for syncronizing and storing "
"a particular data range. The data and metadata related to the module "
"are stored in a dedicated folder "
Expand Down Expand Up @@ -345,8 +344,6 @@ parse_cli_args(["data_dir", DataDir | Rest], C) ->
parse_cli_args(Rest, C#config{ data_dir = DataDir });
parse_cli_args(["log_dir", Dir | Rest], C) ->
parse_cli_args(Rest, C#config{ log_dir = Dir });
parse_cli_args(["metrics_dir", MetricsDir | Rest], C) ->
parse_cli_args(Rest, C#config{ metrics_dir = MetricsDir });
parse_cli_args(["storage_module", StorageModuleString | Rest], C) ->
StorageModules = C#config.storage_modules,
try
Expand Down Expand Up @@ -638,7 +635,7 @@ start(normal, _Args) ->
prometheus_registry:register_collector(prometheus_process_collector),
prometheus_registry:register_collector(ar_metrics_collector),
%% Register custom metrics.
ar_metrics:register(Config#config.metrics_dir),
ar_metrics:register(),
%% Start other apps which we depend on.
ok = prepare_graphql(),
case Config#config.ipfs_pin of
Expand Down Expand Up @@ -780,7 +777,6 @@ start_for_tests(Config) ->
TestConfig = Config#config{
peers = [],
data_dir = ".tmp/data_test_main_" ++ UniqueName,
metrics_dir = ".tmp/metrics_main_" ++ UniqueName,
port = ar_test_node:get_unused_port(),
disable = [randomx_jit],
packing_rate = 20,
Expand Down
7 changes: 1 addition & 6 deletions apps/arweave/src/ar_config.erl
Original file line number Diff line number Diff line change
Expand Up @@ -137,11 +137,6 @@ parse_options([{<<"log_dir">>, Dir} | Rest], Config) when is_binary(Dir) ->
parse_options([{<<"log_dir">>, Dir} | _], _) ->
{error, {bad_type, log_dir, string}, Dir};

parse_options([{<<"metrics_dir">>, MetricsDir} | Rest], Config) when is_binary(MetricsDir) ->
parse_options(Rest, Config#config { metrics_dir = binary_to_list(MetricsDir) });
parse_options([{<<"metrics_dir">>, MetricsDir} | _], _) ->
{error, {bad_type, metrics_dir, string}, MetricsDir};

parse_options([{<<"storage_modules">>, L} | Rest], Config) when is_list(L) ->
try
StorageModules = [parse_storage_module(Bin) || Bin <- L],
Expand Down Expand Up @@ -629,7 +624,7 @@ parse_atom_number({Name, Number}, Parsed) when is_binary(Name), is_number(Number
maps:put(binary_to_atom(Name), Number, Parsed);
parse_atom_number({Key, Value}, Parsed) ->
?LOG_WARNING([{event, parse_config_bad_type},
{key, io_lib:format("~p", [Key])}, {value, iolib:format("~p", [Value])}]),
{key, io_lib:format("~p", [Key])}, {value, io_lib:format("~p", [Value])}]),
Parsed.

parse_requests_per_minute_limit_by_ip(Input) ->
Expand Down
4 changes: 2 additions & 2 deletions apps/arweave/src/ar_http.erl
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ req(Args, ReestablishedConnection) ->
case ReestablishedConnection of
true ->
ok;
false ->
false ->
%% NOTE: the erlang prometheus client looks at the metric name to determine units.
%% If it sees <name>_duration_<unit> it assumes the observed value is in
%% native units and it converts it to <unit> .To query native units, use:
Expand All @@ -99,7 +99,7 @@ req(Args, ReestablishedConnection) ->
ar_http_iface_server:label_http_path(list_to_binary(Path)),
ar_metrics:get_status_class(Response)
], EndTime - StartTime)
end,
end,
Response.
%%% ==================================================================
%%% gen_server callbacks.
Expand Down
14 changes: 2 additions & 12 deletions apps/arweave/src/ar_metrics.erl
Original file line number Diff line number Diff line change
@@ -1,18 +1,15 @@
-module(ar_metrics).

-export([register/1, store/1, get_status_class/1]).
-export([register/0, get_status_class/1]).

-include_lib("arweave/include/ar.hrl").
-include_lib("arweave/include/ar_pricing.hrl").
-include_lib("arweave/include/ar_config.hrl").

%%%===================================================================
%%% Public interface.
%%%===================================================================

%% @doc Declare Arweave metrics.
register(MetricsDir) ->
filelib:ensure_dir(MetricsDir ++ "/"),
register() ->
%% Networking.
prometheus_counter:new([
{name, http_server_accepted_bytes_total},
Expand Down Expand Up @@ -452,13 +449,6 @@ register(MetricsDir) ->
{labels, [process, type]},
{help, "Sampling info about active processes. Only set when debug=true."}]).

%% @doc Store the given metric in a file.
store(Name) ->
{ok, Config} = application:get_env(arweave, config),
ar_storage:write_term(Config#config.metrics_dir, Name, prometheus_gauge:value(Name)).



%% @doc Return the HTTP status class label for cowboy_requests_total and gun_requests_total
%% metrics.
get_status_class({ok, {{Status, _}, _, _, _, _}}) ->
Expand Down
26 changes: 14 additions & 12 deletions apps/arweave/src/ar_packing_server.erl
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
-behaviour(gen_server).

-export([start_link/0, packing_atom/1,
request_unpack/2, request_repack/2, pack/4, unpack/5, repack/6,
request_unpack/2, request_repack/2, pack/4, unpack/5, repack/6,
is_buffer_full/0, record_buffer_size_metric/0]).

-export([init/1, handle_cast/2, handle_call/3, handle_info/2, terminate/2]).
Expand Down Expand Up @@ -146,7 +146,7 @@ init([]) ->
ar:console("~nSetting the packing chunk cache size limit to ~B chunks.~n", [MaxSize]),
ets:insert(?MODULE, {buffer_size_limit, MaxSize}),
timer:apply_interval(200, ?MODULE, record_buffer_size_metric, []),
{ok, #state{
{ok, #state{
workers = Workers, num_workers = SpawnSchedulers }}.

handle_call(Request, _From, State) ->
Expand Down Expand Up @@ -288,7 +288,7 @@ worker(ThrottleDelay, RandomXStateRef) ->
worker(ThrottleDelay, RandomXStateRef);
{repack, Ref, From, Args} ->
{RequestedPacking, Packing, Chunk, AbsoluteOffset, TXRoot, ChunkSize} = Args,
case repack(RequestedPacking, Packing,
case repack(RequestedPacking, Packing,
AbsoluteOffset, TXRoot, Chunk, ChunkSize, RandomXStateRef, internal) of
{ok, Packed, Unpacked} ->
From ! {chunk, {packed, Ref, {RequestedPacking, Packed, AbsoluteOffset, TXRoot,
Expand Down Expand Up @@ -320,15 +320,15 @@ worker(ThrottleDelay, RandomXStateRef) ->
worker(ThrottleDelay, RandomXStateRef)
end.

chunk_key(spora_2_5, ChunkOffset, TXRoot) ->
chunk_key(spora_2_5, ChunkOffset, TXRoot) ->
%% The presence of the absolute end offset in the key makes sure
%% packing of every chunk is unique, even when the same chunk is
%% present in the same transaction or across multiple transactions
%% or blocks. The presence of the transaction root in the key
%% ensures one cannot find data that has certain patterns after
%% packing.
{spora_2_5, crypto:hash(sha256, << ChunkOffset:256, TXRoot/binary >>)};
chunk_key({spora_2_6, RewardAddr}, ChunkOffset, TXRoot) ->
chunk_key({spora_2_6, RewardAddr}, ChunkOffset, TXRoot) ->
%% The presence of the absolute end offset in the key makes sure
%% packing of every chunk is unique, even when the same chunk is
%% present in the same transaction or across multiple transactions
Expand Down Expand Up @@ -389,30 +389,30 @@ unpack(PackingArgs, ChunkOffset, TXRoot, Chunk, ChunkSize,
repack(unpacked, unpacked,
_ChunkOffset, _TXRoot, Chunk, _ChunkSize, _RandomXStateRef, _External) ->
{ok, Chunk, Chunk};
repack(RequestedPacking, unpacked,
repack(RequestedPacking, unpacked,
ChunkOffset, TXRoot, Chunk, _ChunkSize, RandomXStateRef, External) ->
case pack(RequestedPacking, ChunkOffset, TXRoot, Chunk, RandomXStateRef, External) of
{ok, Packed, _} ->
{ok, Packed, Chunk};
Error ->
Error
end;
repack(unpacked, StoredPacking,
repack(unpacked, StoredPacking,
ChunkOffset, TXRoot, Chunk, ChunkSize, RandomXStateRef, External) ->
case unpack(StoredPacking, ChunkOffset, TXRoot, Chunk, ChunkSize, RandomXStateRef, External) of
{ok, Unpacked, _} ->
{ok, Unpacked, Unpacked};
Error ->
Error
end;
repack(RequestedPacking, StoredPacking,
repack(RequestedPacking, StoredPacking,
_ChunkOffset, _TXRoot, Chunk, _ChunkSize, _RandomXStateRef, _External)
when StoredPacking == RequestedPacking ->
%% StoredPacking and Packing are in the same format and neither is unpacked. To
%% StoredPacking and Packing are in the same format and neither is unpacked. To
%% avoid uneccessary unpacking we'll return none for the UnpackedChunk. If a caller
%% needs the UnpackedChunk they should call unpack explicity.
{ok, Chunk, none};
repack(RequestedPacking, StoredPacking,
repack(RequestedPacking, StoredPacking,
ChunkOffset, TXRoot, Chunk, ChunkSize, RandomXStateRef, External) ->
{SourcePacking, UnpackKey} = chunk_key(StoredPacking, ChunkOffset, TXRoot),
{TargetPacking, PackKey} = chunk_key(RequestedPacking, ChunkOffset, TXRoot),
Expand All @@ -421,7 +421,7 @@ repack(RequestedPacking, StoredPacking,
PrometheusLabel = atom_to_list(SourcePacking) ++ "_to_" ++ atom_to_list(TargetPacking),
prometheus_histogram:observe_duration(packing_duration_milliseconds,
[repack, PrometheusLabel, External], fun() ->
ar_mine_randomx:randomx_reencrypt_chunk(SourcePacking, TargetPacking,
ar_mine_randomx:randomx_reencrypt_chunk(SourcePacking, TargetPacking,
RandomXStateRef, UnpackKey, PackKey, Chunk, ChunkSize) end);
Error ->
Error
Expand Down Expand Up @@ -481,7 +481,9 @@ get_packing_latency(PackingStateRef) ->
minimum_run_time(ar_mine_randomx, randomx_decrypt_chunk, [spora_2_5 | Unpack], Repetitions),
minimum_run_time(ar_mine_randomx, randomx_decrypt_chunk, [spora_2_6 | Unpack], Repetitions)}.

record_packing_benchmarks({TheoreticalMaxRate, ChosenRate, Schedulers,
record_packing_benchmarks(
{
TheoreticalMaxRate, ChosenRate, Schedulers,
ActualRatePack_2_5, ActualRatePack_2_6, ActualRateUnpack_2_5,
ActualRateUnpack_2_6}) ->
prometheus_gauge:set(packing_latency_benchmark,
Expand Down
1 change: 0 additions & 1 deletion apps/arweave/test/ar_config_tests.erl
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@ parse_config() ->
block_gossip_peers = [{159,203,158,108,1984}, {150,150,150,150, 1983}],
data_dir = "some_data_dir",
log_dir = "log_dir",
metrics_dir = "metrics_dir",
storage_modules = [{?PARTITION_SIZE, 0, unpacked},
{?PARTITION_SIZE, 2, {spora_2_6, ExpectedMiningAddr}},
{?PARTITION_SIZE, 100, unpacked},
Expand Down
52 changes: 19 additions & 33 deletions apps/arweave/test/ar_config_tests_config_fixture.json
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,13 @@
"port": 1985,
"data_dir": "some_data_dir",
"log_dir": "log_dir",
"metrics_dir": "metrics_dir",
"storage_modules": ["0,unpacked", "2,LKC84RnISouGUw4uMQGCpPS9yDC-tIoqM2UVbUIt-Sw",
"100,unpacked", "0,1,unpacked",
"14,1000000000000,LKC84RnISouGUw4uMQGCpPS9yDC-tIoqM2UVbUIt-Sw"],
"storage_modules": [
"0,unpacked",
"2,LKC84RnISouGUw4uMQGCpPS9yDC-tIoqM2UVbUIt-Sw",
"100,unpacked",
"0,1,unpacked",
"14,1000000000000,LKC84RnISouGUw4uMQGCpPS9yDC-tIoqM2UVbUIt-Sw"
],
"polling": 10,
"block_pollers": 100,
"no_auto_join": true,
Expand All @@ -42,45 +45,28 @@
"disk_pool_jobs": 2,
"requests_per_minute_limit": 2500,
"requests_per_minute_limit_by_ip": {
"127.0.0.1": {
"chunk": 100000,
"data_sync_record": 1,
"recent_hash_list_diff": 200000,
"default": 100
}
"127.0.0.1": {
"chunk": 100000,
"data_sync_record": 1,
"recent_hash_list_diff": 200000,
"default": 100
}
},
"transaction_blacklists": [
"some_blacklist_1",
"some_blacklist_2"
],
"transaction_blacklists": ["some_blacklist_1", "some_blacklist_2"],
"transaction_blacklist_urls": [
"http://some_blacklist_1",
"http://some_blacklist_2/x"
],
"transaction_whitelists": [
"some_whitelist_1",
"some_whitelist_2"
],
"transaction_whitelist_urls": [
"http://some_whitelist"
],
"transaction_whitelists": ["some_whitelist_1", "some_whitelist_2"],
"transaction_whitelist_urls": ["http://some_whitelist"],
"disk_space": 44,
"disk_space_check_frequency": 10,
"init": true,
"internal_api_secret": "some_very_very_long_secret",
"enable": [
"feature_1",
"feature_2"
],
"disable": [
"feature_3",
"feature_4"
],
"enable": ["feature_1", "feature_2"],
"disable": ["feature_3", "feature_4"],
"gateway": "gateway.localhost",
"custom_domains": [
"domain1.example",
"domain2.example"
],
"custom_domains": ["domain1.example", "domain2.example"],
"webhooks": [
{
"events": ["transaction", "block"],
Expand Down
4 changes: 2 additions & 2 deletions apps/arweave/test/ar_test_node.erl
Original file line number Diff line number Diff line change
Expand Up @@ -78,9 +78,9 @@ try_boot_peer(Node, Retries) ->
filelib:ensure_dir("./.tmp"),
Cmd = io_lib:format(
"erl -noshell -name ~s -pa ~s -setcookie ~s -run ar main debug port ~p " ++
"data_dir .tmp/data_test_~s metrics_dir .tmp/metrics_~s no_auto_join packing_rate 20 " ++
"data_dir .tmp/data_test_~s no_auto_join packing_rate 20 " ++
"> ~s-~s.out 2>&1 &",
[NodeName, string:join(Paths, " "), Cookie, Port, NodeName, NodeName, Node,
[NodeName, string:join(Paths, " "), Cookie, Port, NodeName, Node,
get_node_namespace()]),
os:cmd(Cmd),
case wait_until_node_is_ready(NodeName) of
Expand Down
1 change: 0 additions & 1 deletion nix/generate-config.nix
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ pkgs.writeText "config.json" (builtins.toJSON (filterTopLevelNulls {
data_dir = arweaveConfig.dataDir;
log_dir = arweaveConfig.logDir;
storage_modules = arweaveConfig.storageModules;
metrics_dir = arweaveConfig.metricsDir;
start_from_block_index = arweaveConfig.startFromBlockIndex;
transaction_blacklists = arweaveConfig.transactionBlacklists;
transaction_whitelists = arweaveConfig.transactionWhitelists;
Expand Down
8 changes: 0 additions & 8 deletions nix/options.nix
Original file line number Diff line number Diff line change
Expand Up @@ -86,14 +86,6 @@ in
'';
};

metricsDir = mkOption {
type = types.path;
default = "/var/lib/arweave/metrics";
description = ''
Directory path for node metric outputs
'';
};

startFromBlockIndex = mkOption {
type = types.bool;
default = false;
Expand Down

0 comments on commit 40152b4

Please sign in to comment.