From 024bbcf00c4062d2c449955d74ab935ca6ae656d Mon Sep 17 00:00:00 2001 From: Yosuke Hara Date: Fri, 23 Mar 2018 18:21:08 +0900 Subject: [PATCH 1/9] Resolve leo-project/leofs/issues/941 --- .../snmp/snmpa_storage_0/LEO-STORAGE.bin | Bin 11654 -> 16478 bytes .../snmp/snmpa_storage_0/LEO-STORAGE.mib | 138 ++++++++- .../snmp/snmpa_storage_1/LEO-STORAGE.bin | Bin 11654 -> 34 bytes .../snmp/snmpa_storage_1/LEO-STORAGE.mib | 287 +----------------- .../snmp/snmpa_storage_2/LEO-STORAGE.bin | Bin 11654 -> 34 bytes .../snmp/snmpa_storage_2/LEO-STORAGE.mib | 287 +----------------- .../snmp/snmpa_storage_3/LEO-STORAGE.bin | Bin 11654 -> 34 bytes .../snmp/snmpa_storage_3/LEO-STORAGE.mib | 287 +----------------- .../snmp/snmpa_storage_4/LEO-STORAGE.bin | Bin 11654 -> 34 bytes .../snmp/snmpa_storage_4/LEO-STORAGE.mib | 287 +----------------- .../src/leo_storage_statistics.erl | 220 ++++++++++---- 11 files changed, 291 insertions(+), 1215 deletions(-) mode change 100644 => 120000 apps/leo_storage/snmp/snmpa_storage_1/LEO-STORAGE.bin mode change 100644 => 120000 apps/leo_storage/snmp/snmpa_storage_1/LEO-STORAGE.mib mode change 100644 => 120000 apps/leo_storage/snmp/snmpa_storage_2/LEO-STORAGE.bin mode change 100644 => 120000 apps/leo_storage/snmp/snmpa_storage_2/LEO-STORAGE.mib mode change 100644 => 120000 apps/leo_storage/snmp/snmpa_storage_3/LEO-STORAGE.bin mode change 100644 => 120000 apps/leo_storage/snmp/snmpa_storage_3/LEO-STORAGE.mib mode change 100644 => 120000 apps/leo_storage/snmp/snmpa_storage_4/LEO-STORAGE.bin mode change 100644 => 120000 apps/leo_storage/snmp/snmpa_storage_4/LEO-STORAGE.mib diff --git a/apps/leo_storage/snmp/snmpa_storage_0/LEO-STORAGE.bin b/apps/leo_storage/snmp/snmpa_storage_0/LEO-STORAGE.bin index af506d120d80f1c9aa9769a15d98fa630a57d8fb..ef12a9f4c207363a69558b5137655390e99c9d5b 100644 GIT binary patch literal 16478 zcmd6u+i%-86o={9xnJ6@YZ^4gnhksCE-H{_+4{P+Uf^L!fW+Nvz(|avL>EVrJ-DxX z+dtirvSsN|I+V6-I4O|ABF`az{3snske!C?uEw~Yh5ikY$Fc*;rUoZu^R?_ z;rfQP4_+PIu=U@%C)Qd2*6Ksd_LQmrib;)X4?+DolY%V8^SkccSY zp2OCK`ZUgj;{=1cPb4sTsB_SZ-0Rp@%r6X9jUP8Tn^ z7dPs4rPM2L!G71%evhJXH2lwdU#;jp?SFBg`g7P?-Z@0L6@-b!ZCk{#oNxgvr^8oJ z7u0Fcxp4!>VN2n~wXhRavON76E0dSfHwYbJ1>BeIJRJMhFbZwU4o5*^?fVXUAd8YP z;hyCSKP^#4@gz>=sE03Rx!mhFMpnu zLkBB%KdS3yH>(D2S?yE*)(S?x6<%6?JdooUdalhAIX647CY}VgHCA)yu#(dS_jq8- zn&)yFMzL_Lp@=N*d12bte3l-|>T;>UNdYN4snT62FA{brW6#8zDtrcO)A^8L$#UGt zVSBKWr$d7#UqF-joX*?$S{nF{9YJ`v@7yJ^L24u`VN4F;BU9>~i+&p_=cQK!Mo z^}0zItq5puU_e_{BXjK&SH^@q_T#^s2h)mh_GTJR$2@ZRG)~Q9&DE;xlgGjSc`_MF zoEs}4|&1AcjKYQCud0{VL5CnNt3KDcquzV&GdDho0o(soO>#KvT2a+YK&56+mH0qlu}6v!JFX(^e6D zbw+C+g4i9%TlK+f&Vmm6B>&}@mXG*oAP!z<1GqaNEoQ5`!5uhpCbd?da)k$hN>FVG zO1~vhbJF!tVd_9k+TC>Dr{+`=Ln5S!t%g9xXS@GRERbqqQZ&^Js}{53eXk=1lEw~5ovO$AU%MP;5r=jGdEO2 zh3FwfR5-Wkqcspi1?Uk31kP-FM4B8bNS{GS$O1O?6^F>8g7z3ft6tovW>&3=3fbon znQ`BPnw(J_6~HGDplN4_nx9D<6~1kVue1v`+d890hv}li_7q}6_Z6wRp~9%Z?Lcs) zd7bULQgg#}QDJ)lu^D!k>0gzM;-~=bL4f7`cY1DBny7Gn32_;wl>UvuD2@u?R}f%% zZt1yIX`;gQ4a8+igrUAQn6y#h`xfFeWC~H!Gf1O?_Z@^+n@L5DuvQ=y$nPP@ntU^A zj5YG8AX*S&qntcyl#N7E0euC5)@3hJv#irch4BDlETni+6Dvreg7X7}vytYX)=7CacxMq zr>19+_J6^qn#H>UZvqWbS`Cp_I_#{PX8z1~tCn|!>v6fb?>X`h zK`OrPP(@l=uUu{|eFegqDuZdHxSf_pi5tQ^Lfo#HN{3r%nda5I=5)SyRPJm?WOL((I#uB$TxFl*y(IOb&90>*qwdr{e* zEOO$Zdwz{O$Ts1S4h#$K`0STgn{iu12l!-{JHTyCYq%IC!^$P*QoB-zIo7OAVQ!7q rVKC>qg$K;9@cNCJps$^n1M28%5J{I&I#f$PhQ9;v-uYX2Z+Nm z`2@f0ZA@4Wz+ zCnf||Vkr!!Vnh@`N@_(_K<4vLwicG!yibe`#1WVrCnCA|fjBFOBRIKURC+VBBs)ko zx0DHpa*#FxQN=P2AZm-O9ixy^qF7c614mh6QD$ON4%n0I@>9S8(FK$gN0k(vY^N}l zQFQWkg>Imv1gaz`ctj?5D>eXiGqQ+HR#hqi+A>QCY}svPJ&>JTs$ibCngHWwXR|L% kpn$iGX9nt+ZKDG;*iJ!gaVuOw~PrW>#TOM9;ZX^?z%-FuG^QhTU73i$~J9 ze0TXs2EPm^*6nn1^W$jfkY3CkHgh?1RK+){mAcO!NxRl)*P1nlbX^{?kWjKY{LO!v3Wl;aMq(FSeH|4mxEC*-fsvR2M@%VWXpK&P!4^~6D5y9KYb z2K^2azoRgSm;brpU< z;C{jENR@OpKZPyZ6!{Smqm6N$VdbsaM5Y%WjK zrNcay-apRStNJ;6bqVZM!D|r(A)PY`JaSje+R}5#X?kOWXIFlfmct5`a6hYc^L=_7 zR7>x^;<0Z9Gs_oq*@>m++BA{_bDAp_Yi?UBHEIrl=J(X2+?K^>l71|hV=Y-|QO^sK zR(6B)Qr<3>R#Z|Pij}VX#UX3EWR<;ztE?v?8oBnwl@m1!Nao*%ukeyJ_I9(zS2T3# zX7TJ6z(Y^^q=H}EMeC)+cr^Jq9Zt`0r#Iv45he7Cl%0e$ISW6zV(HQKZ4|nE?vRZn zO|qJWR=~aWLnDpHbH-Wd+G-ka1>DVIZmYGJMD6@)?5+Y&a`99w%T{$_meqFoEKsW& zmK-!NrEuzMXi{w@5kY6v|CI}OE}tKl?potF&s=-2Zq#??Evwj6nIiLm$(J@4n`;5mY1jJwgI)Q+|#s)qjO%4{M zXAlzH9KwF*hH9`7J%@;La~nTe12I^DUO+%#X5%B$B1&ft_lm;cMusI&g~%}HnCDsSipXOz$#8U@jr|z#bE(FhX9Lb!T8+DG-2U-194Rh zDgO6Cr8q2rNrFIj3SAsqd~Ri${x4h$l^@X6LwaX?JspmQH>MNpT~a}d$@GyV-zP~4 z+T;?@O?r^Z(2^Mua>8yi8~W2QjvjuaUMw>pifIkHh$rPp=#BmRWCFdVeQdJ+W|sf3 zvnK3W(}2w(F#8|IX}Zb&)`9VU?Jp)Deja|Do{v8buczam#^!mGb)c-yeosx=i`)JB zw`Rrm7Rhu!Q&}xpI!sB9iz?C%H|1lfkPAbJDuahWO7};KVq`3 z+!HWWs`?N%)qyYQ>6fWt)|G$%9M*LDL1JjS;{C%s=*5Sx*|x%gd5nt=%57ny-QN&G Bb`byo diff --git a/apps/leo_storage/snmp/snmpa_storage_1/LEO-STORAGE.bin b/apps/leo_storage/snmp/snmpa_storage_1/LEO-STORAGE.bin new file mode 120000 index 00000000..4a12485b --- /dev/null +++ b/apps/leo_storage/snmp/snmpa_storage_1/LEO-STORAGE.bin @@ -0,0 +1 @@ +../snmpa_storage_0/LEO-STORAGE.bin \ No newline at end of file diff --git a/apps/leo_storage/snmp/snmpa_storage_1/LEO-STORAGE.mib b/apps/leo_storage/snmp/snmpa_storage_1/LEO-STORAGE.mib deleted file mode 100644 index 3b0de492..00000000 --- a/apps/leo_storage/snmp/snmpa_storage_1/LEO-STORAGE.mib +++ /dev/null @@ -1,286 +0,0 @@ -LEO-STORAGE DEFINITIONS ::= BEGIN - -IMPORTS - MODULE-IDENTITY, OBJECT-TYPE, Gauge32, enterprises FROM SNMPv2-SMI - - OBJECT-GROUP FROM SNMPv2-CONF - - DisplayString, TruthValue FROM SNMPv2-TC; - - leofs MODULE-IDENTITY - LAST-UPDATED "201502140000Z" - ORGANIZATION "github.com/leo-project" - CONTACT-INFO - "e-mail:dev@leo-project.org" - DESCRIPTION - "LEO STORAGE SNMP MIB" - REVISION "201502140000Z" - DESCRIPTION - "v1.2" - ::= { enterprises 35450} - -leofsGroups OBJECT IDENTIFIER ::= { leofs 1 } -staticOid OBJECT IDENTIFIER ::= { leofs 34} - --- ===================================== --- Items --- ===================================== --- --- ErlangVM Related Items --- -node-name - OBJECT-TYPE - SYNTAX DisplayString - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Node name" - ::= { staticOid 1 } - -vm-proc-count-1m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Num of Processes (1min mean)" - ::= { staticOid 2 } - -vm-total-mem-1m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total Memory (1min mean)" - ::= { staticOid 3 } - -vm-system-mem-1m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "System Memory (1min mean)" - ::= { staticOid 4 } - -vm-procs-mem-1m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Procs Memory (1min mean)" - ::= { staticOid 5 } - -vm-ets-mem-1m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "ETS Memory (1min mean)" - ::= { staticOid 6 } - -vm-proc-count-5m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Num of Processes (5min mean)" - ::= { staticOid 7 } - -vm-total-mem-5m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total Memory (5min mean)" - ::= { staticOid 8 } - -vm-system-mem-5m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "System Memory (5min mean)" - ::= { staticOid 9 } - -vm-procs-mem-5m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Procs Memory (5min mean)" - ::= { staticOid 10 } - -vm-ets-mem-5m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "ETS Memory (5min mean)" - ::= { staticOid 11 } - --- --- Request-related Items --- -req-writes-1m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total of Writes during 1min" - ::= { staticOid 12 } - -req-reads-1m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total of Reads during 1min" - ::= { staticOid 13 } - -req-deletes-1m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total of Deletes during 1min" - ::= { staticOid 14 } - -req-writes-5m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total of Writes during 5min" - ::= { staticOid 15 } - -req-reads-5m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total of Reads during 5min" - ::= { staticOid 16 } - -req-deletes-5m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total of Deletes during 5min" - ::= { staticOid 17 } - --- --- Object Store related Items --- -storage-active-objects - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Acrive Objects during" - ::= { staticOid 18 } - -storage-total-objects - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total Objects during" - ::= { staticOid 19 } - -storage-active-objects-sizes - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Acrive Objects Sizes during" - ::= { staticOid 20 } - -storage-total-objects-sizes - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total Objects Sizes during" - ::= { staticOid 21 } - --- --- MQ-related items --- -num-of-msg-replicate - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total of messages - replication" - ::= { staticOid 22 } - -num-of-msg-sync-vnode - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total of messages - sync vnode" - ::= { staticOid 23 } - -num-of-msg-rebalance - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total of messages - rebalance" - ::= { staticOid 24 } - ---- ---- Optional VM-related items ---- -vm-used-per-allocated-mem-1m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Used per allocated memory ratio (1min mean)" - ::= { staticOid 31 } - -vm-allocated-mem-1m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Allocated memory (1min mean)" - ::= { staticOid 32 } - -vm-used-per-allocated-mem-5m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Used per allocated memory ratio (5min mean)" - ::= { staticOid 33 } - -vm-allocated-mem-5m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Allocated memory (5min mean)" - ::= { staticOid 34 } - - --- --- Global Group --- -leofsGroup OBJECT-GROUP - OBJECTS {node-name, - vm-proc-count-1m, vm-total-mem-1m, vm-system-mem-1m, vm-procs-mem-1m, vm-ets-mem-1m, - vm-proc-count-5m, vm-total-mem-5m, vm-system-mem-5m, vm-procs-mem-5m, vm-ets-mem-5m, - req-writes-1m, req-reads-1m, req-deletes-1m, - req-writes-5m, req-reads-5m, req-deletes-5m, - storage-active-objects, storage-total-objects, storage-active-objects-sizes, storage-total-objects-sizes, - num-of-msg-replicate, num-of-msg-sync-vnode, num-of-msg-rebalance, - vm-used-per-allocated-mem-1m, vm-allocated-mem-1m, - vm-used-per-allocated-mem-5m, vm-allocated-mem-5m - } - STATUS current - DESCRIPTION "leofs group" - ::= { leofsGroups 1 } - -END diff --git a/apps/leo_storage/snmp/snmpa_storage_1/LEO-STORAGE.mib b/apps/leo_storage/snmp/snmpa_storage_1/LEO-STORAGE.mib new file mode 120000 index 00000000..9d5e2f3b --- /dev/null +++ b/apps/leo_storage/snmp/snmpa_storage_1/LEO-STORAGE.mib @@ -0,0 +1 @@ +../snmpa_storage_0/LEO-STORAGE.mib \ No newline at end of file diff --git a/apps/leo_storage/snmp/snmpa_storage_2/LEO-STORAGE.bin b/apps/leo_storage/snmp/snmpa_storage_2/LEO-STORAGE.bin deleted file mode 100644 index af506d120d80f1c9aa9769a15d98fa630a57d8fb..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 11654 zcmd6tU2oeq6o%>VuOw~PrW>#TOM9;ZX^?z%-FuG^QhTU73i$~J9 ze0TXs2EPm^*6nn1^W$jfkY3CkHgh?1RK+){mAcO!NxRl)*P1nlbX^{?kWjKY{LO!v3Wl;aMq(FSeH|4mxEC*-fsvR2M@%VWXpK&P!4^~6D5y9KYb z2K^2azoRgSm;brpU< z;C{jENR@OpKZPyZ6!{Smqm6N$VdbsaM5Y%WjK zrNcay-apRStNJ;6bqVZM!D|r(A)PY`JaSje+R}5#X?kOWXIFlfmct5`a6hYc^L=_7 zR7>x^;<0Z9Gs_oq*@>m++BA{_bDAp_Yi?UBHEIrl=J(X2+?K^>l71|hV=Y-|QO^sK zR(6B)Qr<3>R#Z|Pij}VX#UX3EWR<;ztE?v?8oBnwl@m1!Nao*%ukeyJ_I9(zS2T3# zX7TJ6z(Y^^q=H}EMeC)+cr^Jq9Zt`0r#Iv45he7Cl%0e$ISW6zV(HQKZ4|nE?vRZn zO|qJWR=~aWLnDpHbH-Wd+G-ka1>DVIZmYGJMD6@)?5+Y&a`99w%T{$_meqFoEKsW& zmK-!NrEuzMXi{w@5kY6v|CI}OE}tKl?potF&s=-2Zq#??Evwj6nIiLm$(J@4n`;5mY1jJwgI)Q+|#s)qjO%4{M zXAlzH9KwF*hH9`7J%@;La~nTe12I^DUO+%#X5%B$B1&ft_lm;cMusI&g~%}HnCDsSipXOz$#8U@jr|z#bE(FhX9Lb!T8+DG-2U-194Rh zDgO6Cr8q2rNrFIj3SAsqd~Ri${x4h$l^@X6LwaX?JspmQH>MNpT~a}d$@GyV-zP~4 z+T;?@O?r^Z(2^Mua>8yi8~W2QjvjuaUMw>pifIkHh$rPp=#BmRWCFdVeQdJ+W|sf3 zvnK3W(}2w(F#8|IX}Zb&)`9VU?Jp)Deja|Do{v8buczam#^!mGb)c-yeosx=i`)JB zw`Rrm7Rhu!Q&}xpI!sB9iz?C%H|1lfkPAbJDuahWO7};KVq`3 z+!HWWs`?N%)qyYQ>6fWt)|G$%9M*LDL1JjS;{C%s=*5Sx*|x%gd5nt=%57ny-QN&G Bb`byo diff --git a/apps/leo_storage/snmp/snmpa_storage_2/LEO-STORAGE.bin b/apps/leo_storage/snmp/snmpa_storage_2/LEO-STORAGE.bin new file mode 120000 index 00000000..4a12485b --- /dev/null +++ b/apps/leo_storage/snmp/snmpa_storage_2/LEO-STORAGE.bin @@ -0,0 +1 @@ +../snmpa_storage_0/LEO-STORAGE.bin \ No newline at end of file diff --git a/apps/leo_storage/snmp/snmpa_storage_2/LEO-STORAGE.mib b/apps/leo_storage/snmp/snmpa_storage_2/LEO-STORAGE.mib deleted file mode 100644 index 3b0de492..00000000 --- a/apps/leo_storage/snmp/snmpa_storage_2/LEO-STORAGE.mib +++ /dev/null @@ -1,286 +0,0 @@ -LEO-STORAGE DEFINITIONS ::= BEGIN - -IMPORTS - MODULE-IDENTITY, OBJECT-TYPE, Gauge32, enterprises FROM SNMPv2-SMI - - OBJECT-GROUP FROM SNMPv2-CONF - - DisplayString, TruthValue FROM SNMPv2-TC; - - leofs MODULE-IDENTITY - LAST-UPDATED "201502140000Z" - ORGANIZATION "github.com/leo-project" - CONTACT-INFO - "e-mail:dev@leo-project.org" - DESCRIPTION - "LEO STORAGE SNMP MIB" - REVISION "201502140000Z" - DESCRIPTION - "v1.2" - ::= { enterprises 35450} - -leofsGroups OBJECT IDENTIFIER ::= { leofs 1 } -staticOid OBJECT IDENTIFIER ::= { leofs 34} - --- ===================================== --- Items --- ===================================== --- --- ErlangVM Related Items --- -node-name - OBJECT-TYPE - SYNTAX DisplayString - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Node name" - ::= { staticOid 1 } - -vm-proc-count-1m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Num of Processes (1min mean)" - ::= { staticOid 2 } - -vm-total-mem-1m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total Memory (1min mean)" - ::= { staticOid 3 } - -vm-system-mem-1m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "System Memory (1min mean)" - ::= { staticOid 4 } - -vm-procs-mem-1m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Procs Memory (1min mean)" - ::= { staticOid 5 } - -vm-ets-mem-1m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "ETS Memory (1min mean)" - ::= { staticOid 6 } - -vm-proc-count-5m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Num of Processes (5min mean)" - ::= { staticOid 7 } - -vm-total-mem-5m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total Memory (5min mean)" - ::= { staticOid 8 } - -vm-system-mem-5m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "System Memory (5min mean)" - ::= { staticOid 9 } - -vm-procs-mem-5m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Procs Memory (5min mean)" - ::= { staticOid 10 } - -vm-ets-mem-5m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "ETS Memory (5min mean)" - ::= { staticOid 11 } - --- --- Request-related Items --- -req-writes-1m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total of Writes during 1min" - ::= { staticOid 12 } - -req-reads-1m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total of Reads during 1min" - ::= { staticOid 13 } - -req-deletes-1m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total of Deletes during 1min" - ::= { staticOid 14 } - -req-writes-5m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total of Writes during 5min" - ::= { staticOid 15 } - -req-reads-5m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total of Reads during 5min" - ::= { staticOid 16 } - -req-deletes-5m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total of Deletes during 5min" - ::= { staticOid 17 } - --- --- Object Store related Items --- -storage-active-objects - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Acrive Objects during" - ::= { staticOid 18 } - -storage-total-objects - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total Objects during" - ::= { staticOid 19 } - -storage-active-objects-sizes - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Acrive Objects Sizes during" - ::= { staticOid 20 } - -storage-total-objects-sizes - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total Objects Sizes during" - ::= { staticOid 21 } - --- --- MQ-related items --- -num-of-msg-replicate - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total of messages - replication" - ::= { staticOid 22 } - -num-of-msg-sync-vnode - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total of messages - sync vnode" - ::= { staticOid 23 } - -num-of-msg-rebalance - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total of messages - rebalance" - ::= { staticOid 24 } - ---- ---- Optional VM-related items ---- -vm-used-per-allocated-mem-1m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Used per allocated memory ratio (1min mean)" - ::= { staticOid 31 } - -vm-allocated-mem-1m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Allocated memory (1min mean)" - ::= { staticOid 32 } - -vm-used-per-allocated-mem-5m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Used per allocated memory ratio (5min mean)" - ::= { staticOid 33 } - -vm-allocated-mem-5m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Allocated memory (5min mean)" - ::= { staticOid 34 } - - --- --- Global Group --- -leofsGroup OBJECT-GROUP - OBJECTS {node-name, - vm-proc-count-1m, vm-total-mem-1m, vm-system-mem-1m, vm-procs-mem-1m, vm-ets-mem-1m, - vm-proc-count-5m, vm-total-mem-5m, vm-system-mem-5m, vm-procs-mem-5m, vm-ets-mem-5m, - req-writes-1m, req-reads-1m, req-deletes-1m, - req-writes-5m, req-reads-5m, req-deletes-5m, - storage-active-objects, storage-total-objects, storage-active-objects-sizes, storage-total-objects-sizes, - num-of-msg-replicate, num-of-msg-sync-vnode, num-of-msg-rebalance, - vm-used-per-allocated-mem-1m, vm-allocated-mem-1m, - vm-used-per-allocated-mem-5m, vm-allocated-mem-5m - } - STATUS current - DESCRIPTION "leofs group" - ::= { leofsGroups 1 } - -END diff --git a/apps/leo_storage/snmp/snmpa_storage_2/LEO-STORAGE.mib b/apps/leo_storage/snmp/snmpa_storage_2/LEO-STORAGE.mib new file mode 120000 index 00000000..9d5e2f3b --- /dev/null +++ b/apps/leo_storage/snmp/snmpa_storage_2/LEO-STORAGE.mib @@ -0,0 +1 @@ +../snmpa_storage_0/LEO-STORAGE.mib \ No newline at end of file diff --git a/apps/leo_storage/snmp/snmpa_storage_3/LEO-STORAGE.bin b/apps/leo_storage/snmp/snmpa_storage_3/LEO-STORAGE.bin deleted file mode 100644 index af506d120d80f1c9aa9769a15d98fa630a57d8fb..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 11654 zcmd6tU2oeq6o%>VuOw~PrW>#TOM9;ZX^?z%-FuG^QhTU73i$~J9 ze0TXs2EPm^*6nn1^W$jfkY3CkHgh?1RK+){mAcO!NxRl)*P1nlbX^{?kWjKY{LO!v3Wl;aMq(FSeH|4mxEC*-fsvR2M@%VWXpK&P!4^~6D5y9KYb z2K^2azoRgSm;brpU< z;C{jENR@OpKZPyZ6!{Smqm6N$VdbsaM5Y%WjK zrNcay-apRStNJ;6bqVZM!D|r(A)PY`JaSje+R}5#X?kOWXIFlfmct5`a6hYc^L=_7 zR7>x^;<0Z9Gs_oq*@>m++BA{_bDAp_Yi?UBHEIrl=J(X2+?K^>l71|hV=Y-|QO^sK zR(6B)Qr<3>R#Z|Pij}VX#UX3EWR<;ztE?v?8oBnwl@m1!Nao*%ukeyJ_I9(zS2T3# zX7TJ6z(Y^^q=H}EMeC)+cr^Jq9Zt`0r#Iv45he7Cl%0e$ISW6zV(HQKZ4|nE?vRZn zO|qJWR=~aWLnDpHbH-Wd+G-ka1>DVIZmYGJMD6@)?5+Y&a`99w%T{$_meqFoEKsW& zmK-!NrEuzMXi{w@5kY6v|CI}OE}tKl?potF&s=-2Zq#??Evwj6nIiLm$(J@4n`;5mY1jJwgI)Q+|#s)qjO%4{M zXAlzH9KwF*hH9`7J%@;La~nTe12I^DUO+%#X5%B$B1&ft_lm;cMusI&g~%}HnCDsSipXOz$#8U@jr|z#bE(FhX9Lb!T8+DG-2U-194Rh zDgO6Cr8q2rNrFIj3SAsqd~Ri${x4h$l^@X6LwaX?JspmQH>MNpT~a}d$@GyV-zP~4 z+T;?@O?r^Z(2^Mua>8yi8~W2QjvjuaUMw>pifIkHh$rPp=#BmRWCFdVeQdJ+W|sf3 zvnK3W(}2w(F#8|IX}Zb&)`9VU?Jp)Deja|Do{v8buczam#^!mGb)c-yeosx=i`)JB zw`Rrm7Rhu!Q&}xpI!sB9iz?C%H|1lfkPAbJDuahWO7};KVq`3 z+!HWWs`?N%)qyYQ>6fWt)|G$%9M*LDL1JjS;{C%s=*5Sx*|x%gd5nt=%57ny-QN&G Bb`byo diff --git a/apps/leo_storage/snmp/snmpa_storage_3/LEO-STORAGE.bin b/apps/leo_storage/snmp/snmpa_storage_3/LEO-STORAGE.bin new file mode 120000 index 00000000..4a12485b --- /dev/null +++ b/apps/leo_storage/snmp/snmpa_storage_3/LEO-STORAGE.bin @@ -0,0 +1 @@ +../snmpa_storage_0/LEO-STORAGE.bin \ No newline at end of file diff --git a/apps/leo_storage/snmp/snmpa_storage_3/LEO-STORAGE.mib b/apps/leo_storage/snmp/snmpa_storage_3/LEO-STORAGE.mib deleted file mode 100644 index 3b0de492..00000000 --- a/apps/leo_storage/snmp/snmpa_storage_3/LEO-STORAGE.mib +++ /dev/null @@ -1,286 +0,0 @@ -LEO-STORAGE DEFINITIONS ::= BEGIN - -IMPORTS - MODULE-IDENTITY, OBJECT-TYPE, Gauge32, enterprises FROM SNMPv2-SMI - - OBJECT-GROUP FROM SNMPv2-CONF - - DisplayString, TruthValue FROM SNMPv2-TC; - - leofs MODULE-IDENTITY - LAST-UPDATED "201502140000Z" - ORGANIZATION "github.com/leo-project" - CONTACT-INFO - "e-mail:dev@leo-project.org" - DESCRIPTION - "LEO STORAGE SNMP MIB" - REVISION "201502140000Z" - DESCRIPTION - "v1.2" - ::= { enterprises 35450} - -leofsGroups OBJECT IDENTIFIER ::= { leofs 1 } -staticOid OBJECT IDENTIFIER ::= { leofs 34} - --- ===================================== --- Items --- ===================================== --- --- ErlangVM Related Items --- -node-name - OBJECT-TYPE - SYNTAX DisplayString - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Node name" - ::= { staticOid 1 } - -vm-proc-count-1m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Num of Processes (1min mean)" - ::= { staticOid 2 } - -vm-total-mem-1m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total Memory (1min mean)" - ::= { staticOid 3 } - -vm-system-mem-1m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "System Memory (1min mean)" - ::= { staticOid 4 } - -vm-procs-mem-1m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Procs Memory (1min mean)" - ::= { staticOid 5 } - -vm-ets-mem-1m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "ETS Memory (1min mean)" - ::= { staticOid 6 } - -vm-proc-count-5m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Num of Processes (5min mean)" - ::= { staticOid 7 } - -vm-total-mem-5m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total Memory (5min mean)" - ::= { staticOid 8 } - -vm-system-mem-5m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "System Memory (5min mean)" - ::= { staticOid 9 } - -vm-procs-mem-5m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Procs Memory (5min mean)" - ::= { staticOid 10 } - -vm-ets-mem-5m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "ETS Memory (5min mean)" - ::= { staticOid 11 } - --- --- Request-related Items --- -req-writes-1m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total of Writes during 1min" - ::= { staticOid 12 } - -req-reads-1m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total of Reads during 1min" - ::= { staticOid 13 } - -req-deletes-1m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total of Deletes during 1min" - ::= { staticOid 14 } - -req-writes-5m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total of Writes during 5min" - ::= { staticOid 15 } - -req-reads-5m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total of Reads during 5min" - ::= { staticOid 16 } - -req-deletes-5m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total of Deletes during 5min" - ::= { staticOid 17 } - --- --- Object Store related Items --- -storage-active-objects - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Acrive Objects during" - ::= { staticOid 18 } - -storage-total-objects - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total Objects during" - ::= { staticOid 19 } - -storage-active-objects-sizes - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Acrive Objects Sizes during" - ::= { staticOid 20 } - -storage-total-objects-sizes - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total Objects Sizes during" - ::= { staticOid 21 } - --- --- MQ-related items --- -num-of-msg-replicate - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total of messages - replication" - ::= { staticOid 22 } - -num-of-msg-sync-vnode - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total of messages - sync vnode" - ::= { staticOid 23 } - -num-of-msg-rebalance - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total of messages - rebalance" - ::= { staticOid 24 } - ---- ---- Optional VM-related items ---- -vm-used-per-allocated-mem-1m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Used per allocated memory ratio (1min mean)" - ::= { staticOid 31 } - -vm-allocated-mem-1m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Allocated memory (1min mean)" - ::= { staticOid 32 } - -vm-used-per-allocated-mem-5m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Used per allocated memory ratio (5min mean)" - ::= { staticOid 33 } - -vm-allocated-mem-5m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Allocated memory (5min mean)" - ::= { staticOid 34 } - - --- --- Global Group --- -leofsGroup OBJECT-GROUP - OBJECTS {node-name, - vm-proc-count-1m, vm-total-mem-1m, vm-system-mem-1m, vm-procs-mem-1m, vm-ets-mem-1m, - vm-proc-count-5m, vm-total-mem-5m, vm-system-mem-5m, vm-procs-mem-5m, vm-ets-mem-5m, - req-writes-1m, req-reads-1m, req-deletes-1m, - req-writes-5m, req-reads-5m, req-deletes-5m, - storage-active-objects, storage-total-objects, storage-active-objects-sizes, storage-total-objects-sizes, - num-of-msg-replicate, num-of-msg-sync-vnode, num-of-msg-rebalance, - vm-used-per-allocated-mem-1m, vm-allocated-mem-1m, - vm-used-per-allocated-mem-5m, vm-allocated-mem-5m - } - STATUS current - DESCRIPTION "leofs group" - ::= { leofsGroups 1 } - -END diff --git a/apps/leo_storage/snmp/snmpa_storage_3/LEO-STORAGE.mib b/apps/leo_storage/snmp/snmpa_storage_3/LEO-STORAGE.mib new file mode 120000 index 00000000..9d5e2f3b --- /dev/null +++ b/apps/leo_storage/snmp/snmpa_storage_3/LEO-STORAGE.mib @@ -0,0 +1 @@ +../snmpa_storage_0/LEO-STORAGE.mib \ No newline at end of file diff --git a/apps/leo_storage/snmp/snmpa_storage_4/LEO-STORAGE.bin b/apps/leo_storage/snmp/snmpa_storage_4/LEO-STORAGE.bin deleted file mode 100644 index af506d120d80f1c9aa9769a15d98fa630a57d8fb..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 11654 zcmd6tU2oeq6o%>VuOw~PrW>#TOM9;ZX^?z%-FuG^QhTU73i$~J9 ze0TXs2EPm^*6nn1^W$jfkY3CkHgh?1RK+){mAcO!NxRl)*P1nlbX^{?kWjKY{LO!v3Wl;aMq(FSeH|4mxEC*-fsvR2M@%VWXpK&P!4^~6D5y9KYb z2K^2azoRgSm;brpU< z;C{jENR@OpKZPyZ6!{Smqm6N$VdbsaM5Y%WjK zrNcay-apRStNJ;6bqVZM!D|r(A)PY`JaSje+R}5#X?kOWXIFlfmct5`a6hYc^L=_7 zR7>x^;<0Z9Gs_oq*@>m++BA{_bDAp_Yi?UBHEIrl=J(X2+?K^>l71|hV=Y-|QO^sK zR(6B)Qr<3>R#Z|Pij}VX#UX3EWR<;ztE?v?8oBnwl@m1!Nao*%ukeyJ_I9(zS2T3# zX7TJ6z(Y^^q=H}EMeC)+cr^Jq9Zt`0r#Iv45he7Cl%0e$ISW6zV(HQKZ4|nE?vRZn zO|qJWR=~aWLnDpHbH-Wd+G-ka1>DVIZmYGJMD6@)?5+Y&a`99w%T{$_meqFoEKsW& zmK-!NrEuzMXi{w@5kY6v|CI}OE}tKl?potF&s=-2Zq#??Evwj6nIiLm$(J@4n`;5mY1jJwgI)Q+|#s)qjO%4{M zXAlzH9KwF*hH9`7J%@;La~nTe12I^DUO+%#X5%B$B1&ft_lm;cMusI&g~%}HnCDsSipXOz$#8U@jr|z#bE(FhX9Lb!T8+DG-2U-194Rh zDgO6Cr8q2rNrFIj3SAsqd~Ri${x4h$l^@X6LwaX?JspmQH>MNpT~a}d$@GyV-zP~4 z+T;?@O?r^Z(2^Mua>8yi8~W2QjvjuaUMw>pifIkHh$rPp=#BmRWCFdVeQdJ+W|sf3 zvnK3W(}2w(F#8|IX}Zb&)`9VU?Jp)Deja|Do{v8buczam#^!mGb)c-yeosx=i`)JB zw`Rrm7Rhu!Q&}xpI!sB9iz?C%H|1lfkPAbJDuahWO7};KVq`3 z+!HWWs`?N%)qyYQ>6fWt)|G$%9M*LDL1JjS;{C%s=*5Sx*|x%gd5nt=%57ny-QN&G Bb`byo diff --git a/apps/leo_storage/snmp/snmpa_storage_4/LEO-STORAGE.bin b/apps/leo_storage/snmp/snmpa_storage_4/LEO-STORAGE.bin new file mode 120000 index 00000000..4a12485b --- /dev/null +++ b/apps/leo_storage/snmp/snmpa_storage_4/LEO-STORAGE.bin @@ -0,0 +1 @@ +../snmpa_storage_0/LEO-STORAGE.bin \ No newline at end of file diff --git a/apps/leo_storage/snmp/snmpa_storage_4/LEO-STORAGE.mib b/apps/leo_storage/snmp/snmpa_storage_4/LEO-STORAGE.mib deleted file mode 100644 index 3b0de492..00000000 --- a/apps/leo_storage/snmp/snmpa_storage_4/LEO-STORAGE.mib +++ /dev/null @@ -1,286 +0,0 @@ -LEO-STORAGE DEFINITIONS ::= BEGIN - -IMPORTS - MODULE-IDENTITY, OBJECT-TYPE, Gauge32, enterprises FROM SNMPv2-SMI - - OBJECT-GROUP FROM SNMPv2-CONF - - DisplayString, TruthValue FROM SNMPv2-TC; - - leofs MODULE-IDENTITY - LAST-UPDATED "201502140000Z" - ORGANIZATION "github.com/leo-project" - CONTACT-INFO - "e-mail:dev@leo-project.org" - DESCRIPTION - "LEO STORAGE SNMP MIB" - REVISION "201502140000Z" - DESCRIPTION - "v1.2" - ::= { enterprises 35450} - -leofsGroups OBJECT IDENTIFIER ::= { leofs 1 } -staticOid OBJECT IDENTIFIER ::= { leofs 34} - --- ===================================== --- Items --- ===================================== --- --- ErlangVM Related Items --- -node-name - OBJECT-TYPE - SYNTAX DisplayString - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Node name" - ::= { staticOid 1 } - -vm-proc-count-1m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Num of Processes (1min mean)" - ::= { staticOid 2 } - -vm-total-mem-1m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total Memory (1min mean)" - ::= { staticOid 3 } - -vm-system-mem-1m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "System Memory (1min mean)" - ::= { staticOid 4 } - -vm-procs-mem-1m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Procs Memory (1min mean)" - ::= { staticOid 5 } - -vm-ets-mem-1m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "ETS Memory (1min mean)" - ::= { staticOid 6 } - -vm-proc-count-5m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Num of Processes (5min mean)" - ::= { staticOid 7 } - -vm-total-mem-5m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total Memory (5min mean)" - ::= { staticOid 8 } - -vm-system-mem-5m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "System Memory (5min mean)" - ::= { staticOid 9 } - -vm-procs-mem-5m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Procs Memory (5min mean)" - ::= { staticOid 10 } - -vm-ets-mem-5m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "ETS Memory (5min mean)" - ::= { staticOid 11 } - --- --- Request-related Items --- -req-writes-1m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total of Writes during 1min" - ::= { staticOid 12 } - -req-reads-1m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total of Reads during 1min" - ::= { staticOid 13 } - -req-deletes-1m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total of Deletes during 1min" - ::= { staticOid 14 } - -req-writes-5m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total of Writes during 5min" - ::= { staticOid 15 } - -req-reads-5m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total of Reads during 5min" - ::= { staticOid 16 } - -req-deletes-5m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total of Deletes during 5min" - ::= { staticOid 17 } - --- --- Object Store related Items --- -storage-active-objects - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Acrive Objects during" - ::= { staticOid 18 } - -storage-total-objects - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total Objects during" - ::= { staticOid 19 } - -storage-active-objects-sizes - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Acrive Objects Sizes during" - ::= { staticOid 20 } - -storage-total-objects-sizes - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total Objects Sizes during" - ::= { staticOid 21 } - --- --- MQ-related items --- -num-of-msg-replicate - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total of messages - replication" - ::= { staticOid 22 } - -num-of-msg-sync-vnode - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total of messages - sync vnode" - ::= { staticOid 23 } - -num-of-msg-rebalance - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Total of messages - rebalance" - ::= { staticOid 24 } - ---- ---- Optional VM-related items ---- -vm-used-per-allocated-mem-1m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Used per allocated memory ratio (1min mean)" - ::= { staticOid 31 } - -vm-allocated-mem-1m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Allocated memory (1min mean)" - ::= { staticOid 32 } - -vm-used-per-allocated-mem-5m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Used per allocated memory ratio (5min mean)" - ::= { staticOid 33 } - -vm-allocated-mem-5m - OBJECT-TYPE - SYNTAX Gauge32 - MAX-ACCESS read-only - STATUS current - DESCRIPTION "Allocated memory (5min mean)" - ::= { staticOid 34 } - - --- --- Global Group --- -leofsGroup OBJECT-GROUP - OBJECTS {node-name, - vm-proc-count-1m, vm-total-mem-1m, vm-system-mem-1m, vm-procs-mem-1m, vm-ets-mem-1m, - vm-proc-count-5m, vm-total-mem-5m, vm-system-mem-5m, vm-procs-mem-5m, vm-ets-mem-5m, - req-writes-1m, req-reads-1m, req-deletes-1m, - req-writes-5m, req-reads-5m, req-deletes-5m, - storage-active-objects, storage-total-objects, storage-active-objects-sizes, storage-total-objects-sizes, - num-of-msg-replicate, num-of-msg-sync-vnode, num-of-msg-rebalance, - vm-used-per-allocated-mem-1m, vm-allocated-mem-1m, - vm-used-per-allocated-mem-5m, vm-allocated-mem-5m - } - STATUS current - DESCRIPTION "leofs group" - ::= { leofsGroups 1 } - -END diff --git a/apps/leo_storage/snmp/snmpa_storage_4/LEO-STORAGE.mib b/apps/leo_storage/snmp/snmpa_storage_4/LEO-STORAGE.mib new file mode 120000 index 00000000..9d5e2f3b --- /dev/null +++ b/apps/leo_storage/snmp/snmpa_storage_4/LEO-STORAGE.mib @@ -0,0 +1 @@ +../snmpa_storage_0/LEO-STORAGE.mib \ No newline at end of file diff --git a/apps/leo_storage/src/leo_storage_statistics.erl b/apps/leo_storage/src/leo_storage_statistics.erl index 61eab903..31c10fb0 100644 --- a/apps/leo_storage/src/leo_storage_statistics.erl +++ b/apps/leo_storage/src/leo_storage_statistics.erl @@ -2,7 +2,7 @@ %% %% LeoStorage %% -%% Copyright (c) 2012-2017 Rakuten, Inc. +%% Copyright (c) 2012-2018 Rakuten, Inc. %% %% This file is provided to you under the Apache License, %% Version 2.0 (the "License"); you may not use this file @@ -40,13 +40,27 @@ %% callback -export([handle_notify/0]). --define(SNMP_MSG_REPLICATE, 'num-of-msg-replicate'). --define(SNMP_MSG_SYNC_VNODE, 'num-of-msg-sync-vnode'). --define(SNMP_MSG_REBALANCE, 'num-of-msg-rebalance'). --define(SNMP_MSG_ACTIVE_SIZE, 'storage-active-objects-sizes'). --define(SNMP_MSG_ACTIVE_OBJS, 'storage-active-objects'). --define(SNMP_MSG_TOTAL_SIZE, 'storage-total-objects-sizes'). --define(SNMP_MSG_TOTAL_OBJS, 'storage-total-objects'). +-define(SNMP_MQ_NUM_OF_REPLICATION, 'mq-num-of-msg-replicate'). +-define(SNMP_MQ_NUM_OF_SYNC_VNODE, 'mq-num-of-msg-sync-vnode'). +-define(SNMP_MQ_NUM_OF_REBALANCE, 'mq-num-of-msg-rebalance'). +-define(SNMP_MQ_NUM_OF_RECOVERY_NODE, 'mq-num-of-msg-recovery-node'). +-define(SNMP_MQ_NUM_OF_DEL_DIR, 'mq-num-of-msg-deletion-dir'). +-define(SNMP_MQ_NUM_OF_ASYNC_DEL_DIR, 'mq-num-of-msg-async-deletion-dir'). +-define(SNMP_MQ_NUM_OF_REQ_DEL_DIR, 'mq-num-of-msg-req-deletion-dir'). +-define(SNMP_MQ_MDCR_NUM_OF_COMP_METADATA, 'mq-mdcr-num-of-msg-req-comp-metadata'). +-define(SNMP_MQ_MDCR_NUM_OF_REQ_SYNC_OBJ, 'mq-mdcr-num-of-msg-req-sync-obj'). + +-define(SNMP_COMP_STATE, 'comp-state'). +-define(SNMP_COMP_LAST_START_DATETIME, 'comp-last-start-datetime'). +-define(SNMP_COMP_LAST_END_DATETIME, 'comp-last-end-datetime'). +-define(SNMP_COMP_NUM_OF_PENDING_TARGETS, 'comp-num-of-pending-targets'). +-define(SNMP_COMP_NUM_OF_ONGOING_TARGETS, 'comp-num-of-ongoing-targets'). +-define(SNMP_COMP_NUM_OF_OUT_OF_TARGETS, 'comp-num-of-out-of-targets'). + +-define(SNMP_STORAGE_ACTIVE_SIZE, 'storage-active-objects-sizes'). +-define(SNMP_STORAGE_ACTIVE_OBJS, 'storage-active-objects'). +-define(SNMP_STORAGE_TOTAL_SIZE, 'storage-total-objects-sizes'). +-define(SNMP_STORAGE_TOTAL_OBJS, 'storage-total-objects'). %%-------------------------------------------------------------------- @@ -63,55 +77,149 @@ start_link(Window) -> %% Callbacks %%-------------------------------------------------------------------- handle_notify() -> - %% set number of queues - Num_1 = case catch leo_mq_api:status(?QUEUE_ID_PER_OBJECT) of - {ok, Res_1} -> - leo_misc:get_value(?MQ_CNS_PROP_NUM_OF_MSGS, Res_1, 0); - _ -> 0 - end, - Num_2 = case catch leo_mq_api:status(?QUEUE_ID_SYNC_BY_VNODE_ID) of - {ok, Res_2} -> - leo_misc:get_value(?MQ_CNS_PROP_NUM_OF_MSGS, Res_2, 0); - _ -> 0 - end, - Num_3 = case catch leo_mq_api:status(?QUEUE_ID_REBALANCE) of - {ok, Res_3} -> - leo_misc:get_value(?MQ_CNS_PROP_NUM_OF_MSGS, Res_3, 0); - _ -> 0 - end, - - catch snmp_generic:variable_set(?SNMP_MSG_REPLICATE, Num_1), - catch snmp_generic:variable_set(?SNMP_MSG_SYNC_VNODE, Num_2), - catch snmp_generic:variable_set(?SNMP_MSG_REBALANCE, Num_3), - - %% set size of stored data - {ok, Ret4} = leo_object_storage_api:stats(), - {TSize2, ASize2, TObjs2, AObjs2} = - lists:foldl(fun(#storage_stats{total_sizes = TSize0, - active_sizes = ASize0, - total_num = TObjs0, - active_num = AObjs0}, - {TSize1, ASize1, TObjs1, AObjs1}) -> - {TSize0 + TSize1, - ASize0 + ASize1, - TObjs0 + TObjs1, - AObjs0 + AObjs1}; - (_, Acc) -> - Acc - end, {0,0,0,0}, Ret4), - - TObjs3 = check_number(TObjs2), - AObjs3 = check_number(AObjs2), - - catch snmp_generic:variable_set(?SNMP_MSG_TOTAL_SIZE, erlang:round(TSize2/1024/1024)), - catch snmp_generic:variable_set(?SNMP_MSG_ACTIVE_SIZE, erlang:round(ASize2/1024/1024)), - catch snmp_generic:variable_set(?SNMP_MSG_TOTAL_OBJS, TObjs3), - catch snmp_generic:variable_set(?SNMP_MSG_ACTIVE_OBJS, AObjs3), + %% Set the total number of each mq item + ok = get_and_set_mq_value( + [{?SNMP_MQ_NUM_OF_REPLICATION, ?QUEUE_ID_PER_OBJECT}, + {?SNMP_MQ_NUM_OF_SYNC_VNODE, ?QUEUE_ID_SYNC_BY_VNODE_ID}, + {?SNMP_MQ_NUM_OF_REBALANCE, ?QUEUE_ID_REBALANCE}, + {?SNMP_MQ_NUM_OF_RECOVERY_NODE, ?QUEUE_ID_RECOVERY_NODE}, + {?SNMP_MQ_NUM_OF_DEL_DIR, ?QUEUE_ID_DEL_DIR}, + {?SNMP_MQ_NUM_OF_ASYNC_DEL_DIR, ?QUEUE_ID_ASYNC_DELETION}, + {?SNMP_MQ_NUM_OF_REQ_DEL_DIR, ?QUEUE_ID_REQ_DEL_DIR}, + {?SNMP_MQ_MDCR_NUM_OF_COMP_METADATA, ?QUEUE_ID_COMP_META_WITH_DC}, + {?SNMP_MQ_MDCR_NUM_OF_REQ_SYNC_OBJ, ?QUEUE_ID_SYNC_OBJ_WITH_DC} + ]), + + %% Set the value of each data-compaction item + ok = get_and_set_compaction_value(), + + %% Set the value of each storage item - Data compaction releated items + ok = get_and_set_storage_value(), ok. + +%% @private +get_and_set_mq_value([]) -> + ok; +get_and_set_mq_value([{?SNMP_MQ_NUM_OF_DEL_DIR = Id, ?QUEUE_ID_DEL_DIR}|Rest]) -> + V = lists:foldl( + fun(QId, SoFar) -> + N = case catch leo_mq_api:status(QId) of + {ok, Ret} -> + leo_misc:get_value( + ?MQ_CNS_PROP_NUM_OF_MSGS, Ret, 0); + _ -> + 0 + end, + SoFar + N + end, 0, ?del_dir_queue_list()), + catch snmp_generic:variable_set(Id, V), + get_and_set_mq_value(Rest); +get_and_set_mq_value([{Id, QId}|Rest]) -> + V = case catch leo_mq_api:status(QId) of + {ok, Ret} -> + leo_misc:get_value(?MQ_CNS_PROP_NUM_OF_MSGS, Ret, 0); + _ -> + 0 + end, + catch snmp_generic:variable_set(Id, V), + get_and_set_mq_value(Rest). + + +%% @private +get_and_set_compaction_value() -> + case leo_compact_fsm_controller:state() of + {ok, #compaction_stats{ + status = Status, + total_num_of_targets = NumOfTargets, + num_of_pending_targets = NumOfPendingTargets, + num_of_ongoing_targets = NumOfOnGoinfTargets}} -> + {LastStartDT_1, LastEndDT_1} = + case leo_object_storage_api:stats() of + {ok, []} -> + {0,0}; + {ok, RetL} -> + lists:foldl( + fun(#storage_stats{compaction_hist = Histories}, + {LastStartDT, LastEndDT}) -> + case (length(Histories) == 0) of + true -> + {LastStartDT, LastEndDT}; + false -> + #compaction_hist{start_datetime = S, + end_datetime = E} = hd(Histories), + {max(LastStartDT, S), max(LastEndDT, E)} + end; + (_, SoFar) -> + SoFar + end, {0,0}, RetL); + _ -> + {0,0} + end, + + [catch snmp_generic:variable_set(Id, V) || + {Id, V} <- [{?SNMP_COMP_STATE, to_compaction_state_int(Status)}, + {?SNMP_COMP_LAST_START_DATETIME, to_unixtime(LastStartDT_1)}, + {?SNMP_COMP_LAST_END_DATETIME, to_unixtime(LastEndDT_1)}, + {?SNMP_COMP_NUM_OF_PENDING_TARGETS, NumOfPendingTargets}, + {?SNMP_COMP_NUM_OF_ONGOING_TARGETS, NumOfOnGoinfTargets}, + {?SNMP_COMP_NUM_OF_OUT_OF_TARGETS, + (NumOfTargets - (NumOfPendingTargets + NumOfOnGoinfTargets))}]]; + _ -> + void + end, + ok. + + +%% @private +get_and_set_storage_value() -> + {ok, Stats} = leo_object_storage_api:stats(), + {TSizeRet, ASizeRet, TObjsRet, AObjsRet} = + lists:foldl( + fun(#storage_stats{ + total_sizes = TSize_1, + active_sizes = ASize_1, + total_num = TObjs_1, + active_num = AObjs_1}, {TSize, ASize, TObjs, AObjs}) -> + {TSize_1 + TSize, + ASize_1 + ASize, + TObjs_1 + TObjs, + AObjs_1 + AObjs}; + (_, Acc) -> + Acc + end, {0,0,0,0}, Stats), + + [catch snmp_generic:variable_set(Id, V) || + {Id, V} <- [{?SNMP_STORAGE_TOTAL_SIZE, erlang:round(TSizeRet / 1024 / 1024)}, + {?SNMP_STORAGE_ACTIVE_SIZE, erlang:round(ASizeRet / 1024 / 1024)}, + {?SNMP_STORAGE_TOTAL_OBJS, check_number(TObjsRet)}, + {?SNMP_STORAGE_ACTIVE_OBJS, check_number(AObjsRet)}]], + ok. + + +%% @private +to_compaction_state_int(?ST_IDLING) -> 0; +to_compaction_state_int(?ST_RUNNING) -> 1; +to_compaction_state_int(?ST_SUSPENDING) -> 2. + + +%% @private +to_unixtime(DateTime) -> + case leo_date:greg_seconds_to_unixtime(DateTime) of + UnixTime when UnixTime < 0 -> + 0; + UnixTime -> + UnixTime + end. + + %% @private -check_number(Value) -> - case (leo_math:power(2,32) =< Value) of - true -> 4294967296; - false -> Value +check_number(V) -> + case (leo_math:power(2,32) =< V) of + true -> + 4294967296; + false when V < 0 -> + 0; + false -> + V end. From 6cf586d0a93e528e0b714a8c8de6db4245f6253b Mon Sep 17 00:00:00 2001 From: Yosuke Hara Date: Mon, 26 Mar 2018 12:35:48 +0900 Subject: [PATCH 2/9] Mainly updates recover-related docs --- docs/admin/index_of_commands.md | 13 +++---- docs/admin/system_operations/data.md | 51 +++++++++++++++++----------- 2 files changed, 38 insertions(+), 26 deletions(-) diff --git a/docs/admin/index_of_commands.md b/docs/admin/index_of_commands.md index 64bf8323..2a6ed80e 100644 --- a/docs/admin/index_of_commands.md +++ b/docs/admin/index_of_commands.md @@ -17,7 +17,7 @@ | Command | Description | |---------------------------------------|-------------| | **General Commands:** | | -| status `[]` | Retrieve status of every node (default)
Retrieve status of a specified node | +| status `[]` | Retrieve status of every node *(default)*
Retrieve status of a specified node | | whereis `` | Retrieve an assigned object by a file path | | **Storage Operation:** | | | detach `` | Remove a storage node in a LeoFS' storage cluster
Current status: `running` OR `stop` | @@ -28,11 +28,12 @@ | mq-stats `` | See statuses of message queues used in a LeoStorage node | | mq-suspend `` `` | Suspend a process consuming a message queue
Active message queues only can be suspended
While suspending, no messages are consumed | | mq-resume `` `` | Resume a process consuming a message queue | -| **Recover Commands:** | | -| recover-file `` | Recover an inconsistent object specified by a file-path | -| recover-node `` | Recover all inconsistent objects in a specified node | -| recover-ring `` | Recover `RING`, a routing table of a specified node | -| recover-cluster `` | Recover all inconsistent objects in a specified cluster in case of using the multi datacenter replication | +| **Recover Commands:** | | +| recover-file `` | Recover an inconsistent object specified by the file-path of the local cluster | +| recover-disk `` | Recover all inconsistent objects on the specified disk in the specified node of the local cluster | +| recover-node `` | Recover all inconsistent objects in the specified node of the local cluster | +| recover-ring `` | Recover **RING, a routing table** of the specified node of the local cluster | +| recover-cluster `` | Recover **all inconsistent objects in the specified remote cluste**r *(NOT the local cluster)* in case of using **the multi datacenter replication** | | **Compaction Commands:** | | | compact-start `` `` [``] | Remove unnecessary objects from a specified node
`num-of-targets`: It controls a number of containers in parallel
`num-of-compaction-procs`: It controls a number of procs to execute the data compaction in parallel| | compact-suspend `` | Suspend a data compaction processing | diff --git a/docs/admin/system_operations/data.md b/docs/admin/system_operations/data.md index 0655e023..f62daae8 100644 --- a/docs/admin/system_operations/data.md +++ b/docs/admin/system_operations/data.md @@ -10,7 +10,7 @@ A brief introduction how LeoFS organize data into actual OS files. ### Append-Only for Content -Once a PUT/DELETE remote procedure call (**RPC**) arrives on LeoStorage appends new blocks including the object information such as the key, data itself and also various associated metadata to the end of a file. +Once a PUT/DELETE remote procedure call, **RPC** arrives on LeoStorage appends new blocks including the object information such as the key, data itself and also various associated metadata to the end of a file. This *Append-Only-File* we call [AVS, Aria Vector Storage](/architecture/leo_storage/#data-structure) which is referenced when retrieving the data through *GET RPCs*. With its inherent nature of the *Append-Only-File*, a data-compaction process is needed to clean up the orphaned space in an AVS. @@ -25,7 +25,7 @@ After having succeeded in appending new blocks to an AVS, then leo_object_storag Some data can be stored into a **Queue** for processing later in the case - A PUT/DELETE operation failed -- A Multi DC Replication (**MDCR**) failed +- A Multi DC Replication, **MDCR** failed - `rebalance/recover-(file|node|cluster)` invoked through leofs-adm @@ -34,7 +34,7 @@ Some data can be stored into a **Queue** for processing later in the case Multiple AVS/KVS pairs can be placed on one node to enable LeoFS handling as much use cases and hardware requirements as possible. See [Concept and Architecture / LeoStorage's Architecture - Data Structure](/architecture/leo_storage/#data-structure). - **Container : AVS/KVS pair = 1 : N** - - Multiple AVS/KVS pairs can be stored under one OS directory (We call it **Container**). + - Multiple AVS/KVS pairs can be stored under one OS directory. It is called **Container**. - 'N' can be specified through [leo_storage.conf](https://github.com/leo-project/leofs/blob/master/apps/leo_storage/priv/leo_storage.conf). - How to choose optimal 'N' - As a data-compaction is executed per AVS/KVS pair, at least the size of a AVS/KVS pair is needed to run data-compaction so that the larger 'N', the less disk space LeoFS uses for data-compaction. @@ -78,11 +78,11 @@ Commands related to Compaction as well as Disk Usage. | Shell | Description | |--- |--- | |**Compaction Commands**|| -| `leofs-adm compact-start (all/) []` | Start Compaction (Transfer its state to **running**).

`num-of-targets`: How many AVS/KVS pairs are compacted.
`num-of-compaction-pro`: How many processes are run in parallel. | +| `leofs-adm compact-start (all/) []` | Start Compaction *(Transfer its state to **running**)*.

`num-of-targets`: How many AVS/KVS pairs are compacted.
`num-of-compaction-pro`: How many processes are run in parallel. | | `leofs-adm compact-suspend ` | Suspend Compaction *(Transfer its state to 'suspend' from running)*.| | `leofs-adm compact-resume ` | Resume Compaction *(Transfer its state to 'running' from suspend)*.| | `leofs-adm compact-status ` | See the Current Compaction Status.| -| `leofs-adm diagnose-start ` | Start Diagnose (Not actually doing Compaction but scanning all AVS/KVS pairs and reporting what objects/metadatas exist as a file).| +| `leofs-adm diagnose-start ` | Start Diagnose *(Not actually doing Compaction but scanning all AVS/KVS pairs and reporting what objects/metadatas exist as a file)*.| |**Disk Usage**|| | `leofs-adm du ` | See the Current Disk Usage.| | `leofs-adm du detail ` | See the Current Disk Usage in detail.| @@ -91,13 +91,16 @@ Commands related to Compaction as well as Disk Usage. #### compact-start ```bash +## Note: +## All AVS/KVS pairs on storage_0@127.0.0.1 +## will be compacted with 3 concurrent processes (default concurrency is 3) ## Example: -## All AVS/KVS pairs on storage_0@127.0.0.1 will be compacted with 3 concurrent processes -## (default concurrency is 3) $ leofs-adm compact-start storage_0@127.0.0.1 all OK -## 5 AVS/KVS pairs on storage_0@127.0.0.1 will be compacted with 2 concurrent processes +## Note: +## Five AVS/KVS pairs on storage_0@127.0.0.1 +## will be compacted with 2 concurrent processes $ leofs-adm compact-start storage_0@127.0.0.1 5 2 OK ``` @@ -205,13 +208,13 @@ The file is formatted as Tab Separated Values *(TSV)* except headers *(head thre | Column Number | Description | |--- |--- | |1|byte-wise Offset where the object is located in an AVS.| -|2|Address ID on RING (Distribute Hash Routing Table).| +|2|Address ID on RING *(Distribute Hash Routing Table)*.| |3|File Name.| |4|The Number of Children in a File.| |5|File Size in bytes.| |6|Timestamp in Unix Time.| |7|Timestamp in Local Time.| -|8|Flag (0/1) representing whether the object is removed. +|8|Flag *(0/1)* representing whether the object is removed. ## Recover Objects @@ -223,10 +226,11 @@ This section provides information about the recovery commands that can be used i | Shell | Description | |--- |--- | -|leofs-adm recover-file \|Recover an inconsistent object specified by the file-path.| -|leofs-adm recover-disk \ \|Recover all inconsistent objects on the specified disk in the specified storage-node. Note that this command can be used ONLY in case all LeoStorage have the same obj_containers configuration.| -|leofs-adm recover-node \|Recover all inconsistent objects in the specified storage-node.| -|leofs-adm recover-cluster \|Recover all inconsistent objects in the specified cluster-id.| +|`leofs-adm recover-file ` |Recover the inconsistent object specified by the file-path.| +|`leofs-adm recover-disk ` |Recover all inconsistent objects on the specified disk in the specified storage-node. **Note that this command can be used ONLY in case all LeoStorage have the same obj_containers configuration.**| +|`leofs-adm recover-ring ` |Recover **RING, a routing table** of the specified node of the local cluster | +|`leofs-adm recover-node ` |Recover all inconsistent objects in the specified storage-node.| +|`leofs-adm recover-cluster ` |Recover **all inconsistent objects in the specified remote cluster** *(NOT the local cluster)* in case of using **the multi datacenter replication**.| #### recover-file @@ -240,14 +244,18 @@ OK #### recover-disk ```bash +## Note: +## If you have the following configuration in leo_storage.conf +## obj_containers.path = [./avs1,./avs2] +## then the below command will recover files stored under ./avs1 ## Example: -## If you have the following configuration in leo_storage.conf -## obj_containers.path = [./avs1,./avs2] -## then the below command will recover files stored under ./avs1 $ leofs-adm recover-disk storage_0@127.0.0.1 1 OK -## If you want to recover files stored under ./avs2 then issue the below one. +## Note: +## If you want to recover files stored under ./avs2 +## then issue the below one. +## Example: $ leofs-adm recover-disk storage_0@127.0.0.1 2 OK ``` @@ -263,8 +271,11 @@ OK #### recover-cluster ```bash +## Note: +## If your LeoFS already uses the multi data center replication, +## you can execute this command. ## Example: -$ leofs-adm recover-cluster cluster-1 +$ leofs-adm recover-cluster remote-leofs OK ``` @@ -289,7 +300,7 @@ When/How to use recover commands. - Invoke `rebalance`. - Source/Destination Cluster Down - Invoke `recover-cluster` with a downed cluster. -- Source/Destination Cluster Down and delete operations on the other side got lost (compacted). +- Source/Destination Cluster Down and delete operations on the other side got lost *(compacted)*. - Set up the cluster from scratch - invoke `recover-cluster` with the new cluster - See also [issue#636](https://github.com/leo-project/leofs/issues/636) for more information. From f71c28475f7e4991e4a96fe73e8afde665430d84 Mon Sep 17 00:00:00 2001 From: Yosuke Hara Date: Mon, 26 Mar 2018 13:46:16 +0900 Subject: [PATCH 3/9] Fix the documentation of each configuration --- docs/admin/settings/leo_gateway.md | 166 ++++++++++++------------- docs/admin/settings/leo_manager.md | 94 +++++++------- docs/admin/settings/leo_storage.md | 192 ++++++++++++++--------------- 3 files changed, 228 insertions(+), 224 deletions(-) diff --git a/docs/admin/settings/leo_gateway.md b/docs/admin/settings/leo_gateway.md index af50e167..fc7132d2 100644 --- a/docs/admin/settings/leo_gateway.md +++ b/docs/admin/settings/leo_gateway.md @@ -16,96 +16,96 @@ If you want to customize settings like where to place `leo_gateway.conf`, what u | Item | Description | |----------------------------------------|------------------------------------------| | **LeoManager Nodes** | -| managers | Name of LeoManager nodes. This configuration is necessary for communicating with `LeoManager's master` and `LeoManager's slave`.

( Default: [manager\_0@127.0.0.1, manager\_1@127.0.0.1] )

| +| `managers` | Name of LeoManager nodes. This configuration is necessary for communicating with `LeoManager's master` and `LeoManager's slave`.

*( Default: [manager\_0@127.0.0.1, manager\_1@127.0.0.1] )*

| | **LeoGateway Basic** | -| protocol | Gateway Protocol - [s3/rest/embed/nfs]

( Default: s3 )

| -| **HTTP Related (S3/REST)** | -| http.port | Port number the Gateway uses for HTTP connections

( Default: 8080 )

| -| http.num_of_acceptors | Numbers of processes listening for connections

( Default: 128 )

| -| http.max_keepalive | Maximum number of requests allowed in a single keep-alive session

( Default: 4096 )

| -| http.layer_of_dirs | Maximum number of virtual directory levels

( Default: 12 )

| -| http.ssl_port | Port number the Gateway uses for HTTPS connections

( Default: 8443 )

| -| http.ssl_certfile | SSL Certificate file

( Default: ./etc/server_cert.pem )

| -| http.ssl_keyfile | SSL key file

( Default: ./etc/server_key.pem )

| -| http.headers_config_file | HTTP custom header configuration file

( Default: ./etc/http_custom_header.conf )

| -| http.timeout_for_header | HTTP timeout for reading header

( Default: 5000, Unit: `msec`)

| -| http.timeout_for_body | HTTP timeout for reading body

( Default: 15000, Unit: `msec`)

| -| **Bucket Related** | -| bucket_prop_sync_interval | Synchronization Interval of Bucket Properties

( Default: 300, Unit: `sec` )

| -| **NFS-related configurations** | -| nfs.mountd.port | Mountd’s port number

( Default: 22050 )

| -| nfs.mountd.acceptors | Mountd’s the number of acceptors

( Default: 128 )

| -| nfs.nfsd.port | NFSd’s port number

( Default: 2049 )

| -| nfs.nfsd.acceptors | NFSd’s the number of acceptors

( Default: 128 )

| +| `protocol` | Gateway Protocol - [s3/rest/embed/nfs]

*( Default: s3 )*

| +| **HTTP Related (S3/REST)** | +| `http.port` | Port number the Gateway uses for HTTP connections

*( Default: 8080 )*

| +| `http.num_of_acceptors` | Numbers of processes listening for connections

*( Default: 128 )*

| +| `http.max_keepalive` | Maximum number of requests allowed in a single keep-alive session

*( Default: 4096 )*

| +| `http.layer_of_dirs` | Maximum number of virtual directory levels

*( Default: 12 )*

| +| `http.ssl_port` | Port number the Gateway uses for HTTPS connections

*( Default: 8443 )*

| +| `http.ssl_certfile` | SSL Certificate file

*( Default: ./etc/server\_cert.pem )*

| +| `http.ssl_keyfile` | SSL key file

*( Default: ./etc/server\_key.pem )*

| +| `http.headers_config_file` | HTTP custom header configuration file

*( Default: ./etc/http\_custom\_header.conf )*

| +| `http.timeout_for_header` | HTTP timeout for reading header

*( Default: 5000, Unit: `msec`)*

| +| `http.timeout_for_body` | HTTP timeout for reading body

*( Default: 15000, Unit: `msec`)*

| +| **Bucket Related** | +| `bucket_prop_sync_interval` | Synchronization Interval of Bucket Properties

*( Default: 300, Unit: `sec` )*

| +| **NFS-related configurations** | +| `nfs.mountd.port` | Mountd’s port number

*( Default: 22050 )*

| +| `nfs.mountd.acceptors` | Mountd’s the number of acceptors

*( Default: 128 )*

| +| `nfs.nfsd.port` | NFSd’s port number

*( Default: 2049 )*

| +| `nfs.nfsd.acceptors` | NFSd’s the number of acceptors

*( Default: 128 )*

| | **Large object processing configuration** | -| large_object.max_chunked_objs | Maximum number of chunked objects

( Default: 1000 )

| -| large_object.chunked_obj_len | Length of a chunked object. This value must be >= `large_object.reading_chunked_obj_len`

( Default: 5242880, Unit: `byte` )

| -| large_object.threshold_of_chunk_len | Threshold when object is chunked

( Default: 5767168, Unit: `byte` )

| -| large_object.reading_chunked_obj_len | Read length of a chunked object. This value must be <= `large_object.chunked_obj_len`

( Default: 5242880, Unit: `byte` )

| -| **Cache configuration** | -| cache.http_cache | Enable HTTP-Cache mode, working like Varnish/Squid. Otherwise as Object Cache

( Default: false )

-| cache.cache_workers | Number of cache workers

( Default: 16 )

| -| cache.cache_ram_capacity | Memory Cache Capacity, divide across workers. This has to satisfy `(8 * 1024 * 1024) * cache.cache_workers >= cache.cache_ram_capacity`

( Default: 268435456, Unit: `byte` )

| -| cache.cache_disc_capacity | Disk Cache Capacity, divide across workers. This has to satisfy `(8 * 1024 * 1024) * cache.cache_workers >= cache.cache_disc_capacity`

( Default: 524288000, Unit: `byte` )

| -| cache.cache_disc_threshold_len | Threshold when object is stored in disk cache

( Default: 1048576, Unit: `byte` )

| -| cache.cache_disc_dir_data | Directory for disk cache data

( Default: ./cache/data )

| -| cache.cache_disc_dir_journal | Directory for disk cache journal

( Default: ./cache/journal )

| -| **HTTP-Cache related** | -| cache.cache_expire | Cache expiry time

( Default: 300, Unit: `sec`)

| -| cache.cache_max_content_len | Maximum length of cached object

( Default: 1048576, Unit: `byte` ) | -| cache.cachable_content_type | Object types to be cached | -| cache.cachable_path_pattern | Path pattern(s) to be cached (regular expression) | -| **Watchdog / REX** | -| watchdog.rex.is_enabled | Enables or disables the rex-watchdog which monitors the memory usage of *Erlangs RPC component*.

( Default: true )

| -| watchdog.rex.interval | An interval of executing the watchdog processing

( Default: 10, Unit: `sec` )

| -| **Watchdog / CPU** | -| watchdog.cpu.is_enabled | Enables or disables the CPU-watchdog which monitors both *CPU load average* and *CPU utilization*

( Default: false )

| -| watchdog.cpu.raised_error_times | Times of raising error to a client

( Default: 5 )

| -| watchdog.cpu.interval | An interval of executing the watchdog processing

( Default: 10, Unit: `sec` )

| -| watchdog.cpu.threshold_cpu_load_avg | Threshold of CPU load average

( Default: 5.0 )

| -| watchdog.cpu.threshold_cpu_util | Threshold of CPU utilization

( Default: 100 )

| +| `large_object.max_chunked_objs` | Maximum number of chunked objects

*( Default: 1000 )*

| +| `large_object.chunked_obj_len` | Length of a chunked object. This value must be >= `large_object.reading_chunked_obj_len`

*( Default: 5242880, Unit: `byte` )*

| +| `large_object.threshold_of_chunk_len` | Threshold when object is chunked

*( Default: 5767168, Unit: `byte` )*

| +| `large_object.reading_chunked_obj_len` | Read length of a chunked object. This value must be <= `large_object.chunked_obj_len`

*( Default: 5242880, Unit: `byte` )*

| +| **Cache configuration** | +| `cache.http_cache` | Enable HTTP-Cache mode, working like Varnish/Squid. Otherwise as Object Cache

*( Default: false )*

+| `cache.cache_workers` | Number of cache workers

*( Default: 16 )*

| +| `cache.cache_ram_capacity` | Memory Cache Capacity, divide across workers. This has to satisfy `(8 * 1024 * 1024) * cache.cache_workers >= cache.cache_ram_capacity`

*( Default: 268435456, Unit: `byte` )*

| +| `cache.cache_disc_capacity` | Disk Cache Capacity, divide across workers. This has to satisfy `(8 * 1024 * 1024) * cache.cache_workers >= cache.cache_disc_capacity`

*( Default: 524288000, Unit: `byte` )*

| +| `cache.cache_disc_threshold_len` | Threshold when object is stored in disk cache

*( Default: 1048576, Unit: `byte` )*

| +| `cache.cache_disc_dir_data` | Directory for disk cache data

*( Default: ./cache/data )*

| +| `cache.cache_disc_dir_journal` | Directory for disk cache journal

*( Default: ./cache/journal )*

| +| **HTTP-Cache related** | +| `cache.cache_expire` | Cache expiry time

*( Default: 300, Unit: `sec`)*

| +| `cache.cache_max_content_len` | Maximum length of cached object

*( Default: 1048576, Unit: `byte` )* | +| `cache.cachable_content_type` | Object types to be cached | +| `cache.cachable_path_pattern` | Path pattern(s) to be cached (regular expression) | +| **Watchdog / REX** | +| `watchdog.rex.is_enabled` | Enables or disables the rex-watchdog which monitors the memory usage of *Erlangs RPC component*.

*( Default: true )*

| +| `watchdog.rex.interval` | An interval of executing the watchdog processing

*( Default: 10, Unit: `sec` )*

| +| **Watchdog / CPU** | +| `watchdog.cpu.is_enabled` | Enables or disables the CPU-watchdog which monitors both *CPU load average* and *CPU utilization*

*( Default: false )*

| +| `watchdog.cpu.raised_error_times` | Times of raising error to a client

*( Default: 5 )*

| +| `watchdog.cpu.interval` | An interval of executing the watchdog processing

*( Default: 10, Unit: `sec` )*

| +| `watchdog.cpu.threshold_cpu_load_avg` | Threshold of CPU load average

*( Default: 5.0 )*

| +| `watchdog.cpu.threshold_cpu_util` | Threshold of CPU utilization

*( Default: 100 )*

| | **Watchdog / IO (Erlang VM Internal Traffic)** | -| watchdog.io.is_enabled | Enables or disables the IO-watchdog which monitors the *Erlang VM Internal Traffic*

( Default: false )

| -| watchdog.io.interval | Watchdog interval

( Default: 1, Unit: `sec` )

| -| watchdog.io.threshold_input_per_sec | Threshold input per second

( Default: 134217728, Unit: `byte` )

| -| watchdog.io.threshold_output_per_sec | Threshold output per second

( Default: 134217728, Unit: `byte` )

| -| **Timeout** | -| timeout.level_1 | Timeout when put object to LeoStorage *(~65536 bytes)*

( Default: 5000, Unit: `msec` ) | -| timeout.level_2 | Timeout when put object to LeoStorage *(~131071 bytes)*

( Default: 7000, Unit: `msec` ) | -| timeout.level_3 | Timeout when put object to LeoStorage *(~524287 bytes)*

( Default: 10000, Unit: `msec` ) | -| timeout.level_4 | Timeout when put object to LeoStorage *(~1048576 bytes)*

( Default: 20000, Unit: `msec` ) | -| timeout.level_5 | Timeout when put object to LeoStorage *(1048576~ bytes)*

( Default: 30000, Unit: `msec` ) | -| timeout.get | Timeout when get object from LeoStorage

( Default: 30000, Unit: `msec` ) | -| timeout.ls | Timeout when list object from LeoStorage

( Default: 30000, Unit: `msec` ) | +| `watchdog.io.is_enabled` | Enables or disables the IO-watchdog which monitors the *Erlang VM Internal Traffic*

*( Default: false )*

| +| `watchdog.io.interval` | Watchdog interval

*( Default: 1, Unit: `sec` )*

| +| `watchdog.io.threshold_input_per_sec` | Threshold input per second

*( Default: 134217728, Unit: `byte` )*

| +| `watchdog.io.threshold_output_per_sec` | Threshold output per second

*( Default: 134217728, Unit: `byte` )*

| +| **Timeout** | +| `timeout.level_1` | Timeout when put object to LeoStorage *(~65536 bytes)*

*( Default: 5000, Unit: `msec` )* | +| `timeout.level_2` | Timeout when put object to LeoStorage *(~131071 bytes)*

*( Default: 7000, Unit: `msec` )* | +| `timeout.level_3` | Timeout when put object to LeoStorage *(~524287 bytes)*

*( Default: 10000, Unit: `msec` )* | +| `timeout.level_4` | Timeout when put object to LeoStorage *(~1048576 bytes)*

*( Default: 20000, Unit: `msec` )* | +| `timeout.level_5` | Timeout when put object to LeoStorage *(1048576~ bytes)*

*( Default: 30000, Unit: `msec` )* | +| `timeout.get` | Timeout when get object from LeoStorage

*( Default: 30000, Unit: `msec` )* | +| `timeout.ls` | Timeout when list object from LeoStorage

*( Default: 30000, Unit: `msec` )* | | **Log** | -| log.log_level | Log level:

  • 0:debug
  • 1:info
  • 2:warn
  • 3:error

( Default: 1 )

| -| log.is_enable_access_log | Enables or disables the access-log feature

( Default: false )

| -| log.erlang | Destination of log file(s) of Erlang's log

( Default: ./log/erlang )

| -| log.app | Destination of log file(s) of LeoStorage

( Default: ./log/app )

| -| log.member_dir | Destination of log file(s) of members of storage-cluster

( Default: ./log/ring )

| -| log.ring_dir | Destination of log file(s) of RING

( Default: ./log/ring )

| -| **Other Directories Settings** | -| queue_dir | Directory of queue for monitoring "RING"

( Default: ./work/queue )

| -| snmp_agent | Directory of SNMP agent configuration

( Default: ./snmp/snmpa_gateway_0/LEO-GATEWAY )

| +| `log.log_level` | Log level:
  • 0:debug
  • 1:info
  • 2:warn
  • 3:error

*( Default: 1 )*

| +| `log.is_enable_access_log` | Enables or disables the access-log feature

*( Default: false )*

| +| `log.erlang` | Destination of log file(s) of Erlang's log

*( Default: ./log/erlang )*

| +| `log.app` | Destination of log file(s) of LeoStorage

*( Default: ./log/app )*

| +| `log.member_di`r | Destination of log file(s) of members of storage-cluster

*( Default: ./log/ring )*

| +| `log.ring_dir` | Destination of log file(s) of RING

*( Default: ./log/ring )*

| +| **Other Directories Settings** | +| `queue_dir` | Directory of queue for monitoring "RING"

*( Default: ./work/queue )*

| +| `snmp_agent` | Directory of SNMP agent configuration

*( Default: ./snmp/snmpa\_gateway\_0/LEO-GATEWAY )*

| ### Erlang VM's Related Configurations -| Item | Description | -|----------------------------------|-----------------------------------------| -| nodename | The format of the node name is `@`, which must be unique always in a LeoFS system

( Default: storage_0@127.0.0.1 )

| -| distributed_cookie | Sets the magic cookie of the node to `Cookie`.

- See also: Distributed Erlang

( Default: 401321b4 )

| -| erlang.kernel_poll | Kernel poll reduces LeoFS' CPU usage when it has hundreds (or more) network connections.

( Default: true )

| -| erlang.asyc_threads | The total number of Erlang aynch threads

( Default: 32 )

| -| erlang.max_ports | The max_ports sets the default value of maximum number of ports.

- See also: [Erlang erlang:open_port/2](http://erlang.org/doc/man/erlang.html)

( Default: 64000 )

| -| erlang.crash_dump | The output destination of an Erlang crash dump

( Default: ./log/erl_crash.dump )

| -| erlang.max_ets_tables | The maxinum number of Erlagn ETS tables

( Default: 256000 )

| -| erlang.smp | `-smp` enable and `-smp` start the Erlang runtime system with SMP support enabled.

( Default: enable )

| -| erlang.schedulers.compaction\_of\_load | Enables or disables scheduler compaction of load. If it's enabled, the Erlang VM will attempt to fully load as many scheduler threads as mush as possible.

( Default: true )

| -| erlang.schedulers.utilization\_balancing | Enables or disables scheduler utilization balancing of load. By default scheduler utilization balancing is disabled and instead scheduler compaction of load is enabled, which strives for a load distribution that causes as many scheduler threads as possible to be fully loaded (that is, not run out of work).

( Default: false )

| -| erlang.distribution\_buffer\_size | Sender-side network distribution buffer size *(unit: KB)*

( Default: 32768 )

| -| erlang.fullsweep\_after | Option fullsweep_after makes it possible to specify the maximum number of generational collections before forcing a fullsweep, even if there is room on the old heap. Setting the number to zero disables the general collection algorithm, that is, all live data is copied at every garbage collection.

( Default: 0 )

| -| erlang.secio | Enables or disables eager check I/O scheduling. The flag effects when schedulers will check for I/O operations possible to execute, and when such I/O operations will execute.

( Default: true )

| -| process_limit | The maxinum number of Erlang processes. Sets the maximum number of simultaneously existing processes for this system if a Number is passed as value. Valid range for Number is [1024-134217727]

( Default: 1048576 )

| +| Item | Description | +|------------------------------------|-----------------------------------------| +| `nodename` | The format of the node name is `@`, which must be unique always in a LeoFS system

*( Default: storage\_0@127.0.0.1 )*

| +| `distributed_cookie` | Sets the magic cookie of the node to `Cookie`.

- See also: Distributed Erlang

*( Default: 401321b4 )*

| +| `erlang.kernel_poll` | Kernel poll reduces LeoFS' CPU usage when it has hundreds (or more) network connections.

*( Default: true )*

| +| `erlang.asyc_threads` | The total number of Erlang aynch threads

*( Default: 32 )*

| +| `erlang.max_ports` | The max\_ports sets the default value of maximum number of ports.

- See also: [Erlang erlang:open_port/2](http://erlang.org/doc/man/erlang.html)

*( Default: 64000 )*

| +| `erlang.crash_dump` | The output destination of an Erlang crash dump

*( Default: ./log/erl\_crash.dump )*

| +| `erlang.max_ets_tables` | The maxinum number of Erlagn ETS tables

*( Default: 256000 )*

| +| `erlang.smp` | `-smp` enable and `-smp` start the Erlang runtime system with SMP support enabled.

*( Default: enable )*

| +| `erlang.schedulers.compaction_of_load` | Enables or disables scheduler compaction of load. If it's enabled, the Erlang VM will attempt to fully load as many scheduler threads as mush as possible.

*( Default: true )*

| +| `erlang.schedulers.utilization_balancing` | Enables or disables scheduler utilization balancing of load. By default scheduler utilization balancing is disabled and instead scheduler compaction of load is enabled, which strives for a load distribution that causes as many scheduler threads as possible to be fully loaded (that is, not run out of work).

*( Default: false )*

| +| `erlang.distribution_buffer_size` | Sender-side network distribution buffer size *(unit: KB)*

*( Default: 32768 )*

| +| `erlang.fullsweep_after` | Option fullsweep\_after makes it possible to specify the maximum number of generational collections before forcing a fullsweep, even if there is room on the old heap. Setting the number to zero disables the general collection algorithm, that is, all live data is copied at every garbage collection.

*( Default: 0 )*

| +| `erlang.secio` | Enables or disables eager check I/O scheduling. The flag effects when schedulers will check for I/O operations possible to execute, and when such I/O operations will execute.

*( Default: true )*

| +| `process_limit` | The maxinum number of Erlang processes. Sets the maximum number of simultaneously existing processes for this system if a Number is passed as value. Valid range for Number is [1024-134217727]

*( Default: 1048576 )*

| ### Notes and Tips of the Configuration diff --git a/docs/admin/settings/leo_manager.md b/docs/admin/settings/leo_manager.md index 64101906..46146657 100644 --- a/docs/admin/settings/leo_manager.md +++ b/docs/admin/settings/leo_manager.md @@ -22,61 +22,65 @@ The default setting is to launch a LeoFS system on one node, whose setting canno | Item | Description | |----------------------------------|------------------------------------------| | **Basic** | -| manager.partner | The partner of manager's alias. This configuration is necessary for communicationg between `LeoManager's master` and `LeoManager's slave`.
( Default: manager_1@127.0.0.1 )| -| console.port.cui | The port number of LeoManager's console for text format
( Default: 10010 ) | -| console.port.json | The port number of LeoManager's console for JSON format
( Default: 10020 ) | -| console.acceptors.cui | The maximum number of acceptors of LeoManager's console for text format
( Default: 3 ) | -| console.acceptors.json | The maximum number of acceptors of LeoManager's console for JSON format
( Default:16 ) | +| `manager.partner` | The partner of manager's alias. This configuration is necessary for communicationg between `LeoManager's master` and `LeoManager's slave`.
*( Default: manager_1@127.0.0.1 )*| +| `console.port.cui` | The port number of LeoManager's console for text format
*( Default: 10010 )* | +| `console.port.json` | The port number of LeoManager's console for JSON format
*( Default: 10020 )* | +| `console.acceptors.cui` | The maximum number of acceptors of LeoManager's console for text format
*( Default: 3 )* | +| `console.acceptors.json` | The maximum number of acceptors of LeoManager's console for JSON format
*( Default:16 )* | | **System** | -| system.dc_id | `Datacenter ID` is necessary for using the data center replication
( Default: dc_1 ) | -| system.cluster_id | `Cluster ID` is also necessary for using the data center replication
( Default: leofs_1 ) | +| `system.dc_id` | `Datacenter ID` is necessary for using the data center replication
*( Default: dc_1 )* | +| `system.cluster_id` | `Cluster ID` is also necessary for using the data center replication
*( Default: leofs_1 )* | | **[Consistency Level](cluster.md)** | -| consistency.num\_of\_replicas | `only LeoManager's master`
The total number of object copies
( Default: 1 ) | -| consistency.write | `only LeoManager's master`
The total number of object copies needed for a successful WRITE operation
( Default: 1 ) | -| consistency.read | `only LeoManager's master`
The total number of object copies needed for a successful READ operation
( Default: 1 ) | -| consistency.delete | `only LeoManager's master`
The total number of object copies needed for a successful DELETE operation
( Default: 1 ) | -| consistency.rack\_aware\_replicas | `only LeoManager's master`
The total number of object copies of rack-aware
( Default: 0 ) | -| **Multi Data Center Replication** | -| mdc_replication.max\_targets | `only LeoManager's master`
The maximum number of replication targets of clusters OR data centers
( Default: 2 ) | -| mdc\_replication.num\_of\_replicas\_a\_dc | `only LeoManager's master`
A remote cluster of a LeoFS system which receives this cluster's objects, and then replicates them, which adhere to a replication method of each object
( Default: 1 )| -| mdc\_replication.consistency.write | `only LeoManager's master` `[since 1.3.3]`
A number of replicas needed for a successful WRITE-operation
( Default: 1 ) | -| mdc\_replication.consistency.read | `only LeoManager's master` `[since 1.3.3]`
A number of replicas needed for a successful READ-operation
( Default: 1 ) | -| mdc\_replication.consistency.delete | `only LeoManager's master` `[since 1.3.3]`
A number of replicas needed for a successful DELETE-operation
( Default: 1 ) | +| `consistency.num_of_replicas` | `only LeoManager's master`
The total number of object copies
*( Default: 1 )* | +| `consistency.write` | `only LeoManager's master`
The total number of object copies needed for a successful WRITE operation
*( Default: 1 )* | +| `consistency.read` | `only LeoManager's master`
The total number of object copies needed for a successful READ operation
*( Default: 1 )* | +| `consistency.delete` | `only LeoManager's master`
The total number of object copies needed for a successful DELETE operation
*( Default: 1 )* | +| `consistency.rack_aware_replicas` | `only LeoManager's master`
The total number of object copies of rack-aware
*( Default: 0 )* | +| **Multi Data Center Replication** | +| `mdc_replication.max_targets` | `only LeoManager's master`
The maximum number of replication targets of clusters OR data centers
*( Default: 2 )* | +| `mdc_replication.num_of_replicas_a_dc` | `only LeoManager's master`
A remote cluster of a LeoFS system which receives this cluster's objects, and then replicates them, which adhere to a replication method of each object
*( Default: 1 )*| +| `mdc_replication.consistency.write` | `only LeoManager's master` `[since 1.3.3]`
A number of replicas needed for a successful WRITE-operation
*( Default: 1 )* | +| `mdc_replication.consistency.read` | `only LeoManager's master` `[since 1.3.3]`
A number of replicas needed for a successful READ-operation
*( Default: 1 )* | +| `mdc_replication.consistency.delete` | `only LeoManager's master` `[since 1.3.3]`
A number of replicas needed for a successful DELETE-operation
*( Default: 1 )* | | **RPC for Multi Datacenter Replication** | -| rpc.server.acceptors | The total number of acceptor of the RPC server
( Default: 16 ) | -| rpc.server.listen\_port | The listening port of the RPC server
( Default: 13075 ) | -| rpc.server.listen\_timeout | The listening timeout
( Default: 5000 ) | -| rpc.client.connection\_pool\_size | A client is able to keep connections of a remote LeoFS up to the pool size
( Default: 16 ) | -| rpc.client.connection\_buffer\_size | A client is able to increase connections of a remote LeoFS up to the buffer size
( Default: 16 ) | +| `rpc.server.acceptors` | The total number of acceptor of the RPC server
*( Default: 16 )* | +| `rpc.server.listen_port` | The listening port of the RPC server
*( Default: 13075 )* | +| `rpc.server.listen_timeout` | The listening timeout
*( Default: 5000 )* | +| `rpc.client.connection_pool_size` | A client is able to keep connections of a remote LeoFS up to the pool size
*( Default: 16 )* | +| `rpc.client.connection_buffer_size` | A client is able to increase connections of a remote LeoFS up to the buffer size
*( Default: 16 )* | | **Mnesia** | -| mnesia.dir | The directory of the database file of Mnesia*(Erlang distributed DB)*
( Default: ./work/mnesia/127.0.0.1 ) | -| mnesia.dump\_log\_write\_threshold | The maximum number of writes allowed to the transaction log before a new dump of the log is performed. Default is 100 log writes.

- See also: Erlang Mnesia dump_log_write_threshold
( Default: 50000 ) | -| mnesia.dc\_dump\_limit | Mnesia's tables are dumped when *filesize(Log) > (filesize(Tab)/Dc_dump_limit)*. Lower values reduce CPU overhead but increase disk space and startup times. Default is 4.

- See also: Erlang Mnesia
( Default: 40 ) | +| `mnesia.dir` | The directory of the database file of Mnesia*(Erlang distributed DB)*
*( Default: ./work/mnesia/127.0.0.1 )* | +| `mnesia.dump_log_write_threshold` | The maximum number of writes allowed to the transaction log before a new dump of the log is performed. Default is 100 log writes.

- See also: Erlang Mnesia dump_log_write_threshold
*( Default: 50000 )* | +| `mnesia.dc_dump_limit` | Mnesia's tables are dumped when *filesize(Log) > (filesize(Tab)/Dc_dump_limit)*. Lower values reduce CPU overhead but increase disk space and startup times. Default is 4.

- See also: Erlang Mnesia
*( Default: 40 )* | | **Log** | -| log.log_level | LeoManager's logger controls outputting logs by the log level:
  • 1: Info
  • 2: Warn
  • 3: Error
( Default: 1 ) | -| log.erlang | The output destination of Erlang's logs
( Default: ./log/erlang ) | -| log.app | The output destination of LeoManager's logs
( Default: ./log/app ) | -| log.member_dir | The output destination of the member's dump file
( Default: ./log/ring ) | -| log.ring_dir | The output destination of the RING's dump file
( Default: ./log/ring ) | +| `log.log_level` | LeoManager's logger controls outputting logs by the log level:
  • 1: Info
  • 2: Warn
  • 3: Error
*( Default: 1 )* | +| `log.erlang` | The output destination of Erlang's logs
*( Default: ./log/erlang )* | +| `log.app` | The output destination of LeoManager's logs
*( Default: ./log/app )* | +| `log.member_dir` | The output destination of the member's dump file
*( Default: ./log/ring )* | +| `log.ring_dir` | The output destination of the RING's dump file
*( Default: ./log/ring )* | | **Other Directories** | -| queue_dir | The directory of the data file of LeoFS' MQ
( Default: ./work/queue ) | -| snmp_agent | The directory of the snmp agent file of LeoFS
( Default: ./snmp/snmpa_manager_0/LEO-MANAGER ) | +| `queue_dir` | The directory of the data file of LeoFS' MQ
*( Default: ./work/queue )* | +| `snmp_agent` | The directory of the snmp agent file of LeoFS
*( Default: ./snmp/snmpa\_manager\_0/LEO-MANAGER )* | ### Erlang VM's Related Configurations -| Item | Description | -|----------------------------------|------------------------------------------| -| nodename | The format of the node name is `@`, which must be unique always in a LeoFS system
( Default: manager_0@127.0.0.1 ) | -| distributed\_cookie | Sets the magic cookie of the node to `Cookie`.

- See also: Distributed Erlang
( Default: 401321b4 ) | -| erlang.kernel\_poll | Kernel poll reduces LeoFS' CPU usage when it has hundreds (or more) network connections
( Default: true ) | -| erlang.asyc\_threads | The total number of Erlang aynch threads for the async thread pool. *The asynchronous thread pool* are OS threads which are userd for I/O operations.
( Default: 32 ) | -| erlang.max\_ports | The max_ports sets the default value of maximum number of ports.

- See also: [Erlang erlang:open_port/2](http://erlang.org/doc/man/erlang.html)
( Default: 64000 ) | -| erlang.crash\_dump | The output destination of an Erlang crash dump
( Default: ./log/erl_crash.dump ) | -| erlang.max\_ets\_tables | The maxinum number of Erlagn ETS tables
( Default: 256000 ) | -| erlang.smp | `-smp` enable and `-smp` start the Erlang runtime system with SMP support enabled
( Default: enable ) | -| process\_limit | The maxinum number of Erlang processes. Sets the maximum number of simultaneously existing processes for this system if a Number is passed as value. Valid range for Number is [1024-134217727]
( Default: 1048576 )| - +| Item | Description | +|------------------------------------|-----------------------------------------| +| `nodename` | The format of the node name is `@`, which must be unique always in a LeoFS system

*( Default: manager\_0@127.0.0.1 )*

| +| `distributed_cookie` | Sets the magic cookie of the node to `Cookie`.

- See also: Distributed Erlang

*( Default: 401321b4 )*

| +| `erlang.kernel_poll` | Kernel poll reduces LeoFS' CPU usage when it has hundreds (or more) network connections.

*( Default: true )*

| +| `erlang.asyc_threads` | The total number of Erlang aynch threads

( Default: 32 )

| +| `erlang.max_ports` | The max\_ports sets the default value of maximum number of ports.

- See also: [Erlang erlang:open_port/2](http://erlang.org/doc/man/erlang.html)

*( Default: 64000 )*

| +| `erlang.crash_dump` | The output destination of an Erlang crash dump

*( Default: ./log/erl\_crash.dump )*

| +| `erlang.max_ets_tables` | The maxinum number of Erlagn ETS tables

*( Default: 256000 )*

| +| `erlang.smp` | `-smp` enable and `-smp` start the Erlang runtime system with SMP support enabled.

*( Default: enable )*

| +| `erlang.schedulers.compaction_of_load` | Enables or disables scheduler compaction of load. If it's enabled, the Erlang VM will attempt to fully load as many scheduler threads as mush as possible.

*( Default: true )*

| +| `erlang.schedulers.utilization_balancing` | Enables or disables scheduler utilization balancing of load. By default scheduler utilization balancing is disabled and instead scheduler compaction of load is enabled, which strives for a load distribution that causes as many scheduler threads as possible to be fully loaded (that is, not run out of work).

*( Default: false )*

| +| `erlang.distribution_buffer_size` | Sender-side network distribution buffer size *(unit: KB)*

*( Default: 32768 )*

| +| `erlang.fullsweep_after` | Option fullsweep\_after makes it possible to specify the maximum number of generational collections before forcing a fullsweep, even if there is room on the old heap. Setting the number to zero disables the general collection algorithm, that is, all live data is copied at every garbage collection.

*( Default: 0 )*

| +| `erlang.secio` | Enables or disables eager check I/O scheduling. The flag effects when schedulers will check for I/O operations possible to execute, and when such I/O operations will execute.

*( Default: true )*

| +| `process_limit` | The maxinum number of Erlang processes. Sets the maximum number of simultaneously existing processes for this system if a Number is passed as value. Valid range for Number is [1024-134217727]

*( Default: 1048576 )*

| ## Related Links diff --git a/docs/admin/settings/leo_storage.md b/docs/admin/settings/leo_storage.md index 234d6130..30c63d05 100644 --- a/docs/admin/settings/leo_storage.md +++ b/docs/admin/settings/leo_storage.md @@ -11,17 +11,17 @@ | Item | Irrevocable? | Description | |----------------------------------------|----------------|------------------------------------------| | **LeoStorage Basic** | -| obj_containers.path | Modifiable with condition | Able to change the directory of the container(s) but not able to add or remove the directory(s). You need to move the data files which are `/avs/object` and `/avs/metadata`, which adhere to this configuration. | -| obj_containers.num\_of\_containers | Yes | Not able to change the configuration because LeoStorage cannot retrieve objects or metadatas. | -| obj_containers.metadata\_storage | Yes | As above | -| num_of_vnodes` | Yes | As above | +| `obj_containers.path` | Modifiable with condition | Able to change the directory of the container(s) but not able to add or remove the directory(s). You need to move the data files which are `/avs/object` and `/avs/metadata`, which adhere to this configuration. | +| `obj_containers.num_of_containers` | Yes | Not able to change the configuration because LeoStorage cannot retrieve objects or metadatas. | +| `obj_containers.metadata_storage` | Yes | As above | +| `num_of_vnodes` | Yes | As above | | **MQ** | -| mq.backend\_db | Modifiable with condition | Lose all the MQ's data after changing | -| mq.num\_of\_mq\_procs | Modifiable with condition | As above | +| `mq.backend_db` | Modifiable with condition | Lose all the MQ's data after changing | +| `mq.num_of_mq_procs` | Modifiable with condition | As above | | **Replication and Recovery object(s)** | -| replication.rack_awareness.rack_id | Yes | Not able to change the configuration because LeoFS cannot retrieve objects or metadatas. | +| `replication.rack_awareness.rack_id` | Yes | Not able to change the configuration because LeoFS cannot retrieve objects or metadatas. | | **Other Directories Settings** | -| queue_dir | Modifiable with condition | Able to change the MQ's directory but you need to move the MQ's data, which adhere to this configuration. | +| `queue_dir` | Modifiable with condition | Able to change the MQ's directory but you need to move the MQ's data, which adhere to this configuration. | ### Other Configurations @@ -35,112 +35,112 @@ If you want to modify settings like where to place `leo_storage.conf`, what user | Item | Description | |----------------------------------------|------------------------------------------| | **LeoManager Nodes** | -| managers | Name of LeoManager nodes. This configuration is necessary for communicating with `LeoManager's master` and `LeoManager's slave`.

( Default: [manager\_0@127.0.0.1, manager\_1@127.0.0.1] )

| +| `managers` | Name of LeoManager nodes. This configuration is necessary for communicating with `LeoManager's master` and `LeoManager's slave`.

*( Default: [manager\_0@127.0.0.1, manager\_1@127.0.0.1] )*

| | **LeoStorage Basic** | -| obj\_containers.path | Directories of object-containers

( Default: [./avs] )

| -| obj\_containers.num\_of\_containers | A number of object-containers of each directory. As **backend_db.eleveldb.write_buf_size** \* **obj\_containers.num\_of\_containers** memory can be consumed in total, take both into account to meet with your memory footprint requirements on LeoStorage.

( Default: [8] )

| -| obj\_containers.sync\_mode | Mode of the data synchronization. There're three modes:
  • `none`: Not synchronization every time *(default)*
  • `periodic`: Periodic synchronization which depends on `obj_containers.sync_interval_in_ms`
  • `writethrough`: Ensures that any buffers kept by the OS are written to disk every time

( Default: none )

| -| obj\_containers.sync\_interval\_in\_ms | Interval in ms of the data synchronization

( Default: 1000, Unit: `msec` )

| -| obj\_containers.metadata\_storage | The metadata storage feature is pluggable which depends on bitcask and leveldb.

( Default: leveldb )

| -| num\_of\_vnodes | The total number of virtual-nodes of a LeoStorage node for generating the distributed hashtable (RING)

( Default: 168 )

| -| object_storage.is\_strict\_check | Enable strict check between checksum of a metadata and checksum of an object.

( Default: false )

| -| object_storage.threshold\_of\_slow\_processing |Threshold of slow processing

( Default: 1000, Unit: `msec` )

| -| seeking\_timeout\_per\_metadata | Timeout of seeking metadatas per a metadata

( Default: 10, Unit: `msec` )

| -| max\_num\_of\_procs | Maximum number of processes for both write and read operation

( Default: 3000 )

| -| num\_of\_obj\_storage\_read\_procs | Total number of obj-storage-read processes per object-container, AVS
  • Range: [1..100]

( Default: 3 )

| +| `obj_containers.path` | Directories of object-containers

*( Default: [./avs] )*

| +| `obj_containers.num_of_containers` | A number of object-containers of each directory. As **backend_db.eleveldb.write_buf_size** \* **obj\_containers.num\_of\_containers** memory can be consumed in total, take both into account to meet with your memory footprint requirements on LeoStorage.

*( Default: [8] )*

| +| `obj_containers.sync_mode` | Mode of the data synchronization. There're three modes:
  • `none`: Not synchronization every time *(default)*
  • `periodic`: Periodic synchronization which depends on `obj_containers.sync_interval_in_ms`
  • `writethrough`: Ensures that any buffers kept by the OS are written to disk every time

*( Default: none )*

| +| `obj_containers.sync_interval_in_ms` | Interval in ms of the data synchronization

*( Default: 1000, Unit: `msec` )*

| +| `obj_containers.metadata_storage` | The metadata storage feature is pluggable which depends on bitcask and leveldb.

*( Default: leveldb )*

| +| `num_of_vnodes` | The total number of virtual-nodes of a LeoStorage node for generating the distributed hashtable, RING

*( Default: 168 )*

| +| `object_storage.is_strict_check` | Enable strict check between checksum of a metadata and checksum of an object.

*( Default: false )*

| +| `object_storage.threshold_of_slow_processing` |Threshold of slow processing

*( Default: 1000, Unit: `msec` )*

| +| `seeking_timeout_per_metadata` | Timeout of seeking metadatas per a metadata

*( Default: 10, Unit: `msec` )*

| +| `max_num_of_procs` | Maximum number of processes for both write and read operation

*( Default: 3000 )*

| +| `num_of_obj_storage_read_procs` | Total number of obj-storage-read processes per object-container, AVS
  • Range: [1..100]

*( Default: 3 )*

| | **Watchdog** | -| watchdog.common.loosen_control_at_safe_count | When reach a number of safe *(clear watchdog)*, a watchdog loosen the control

( Default: 1 )

| +| `watchdog.common.loosen_control_at_safe_count` | When reach a number of safe *(clear watchdog)*, a watchdog loosen the control

*( Default: 1 )*

| | **Watchdog / REX** | -| watchdog.rex.is_enabled | Enables or disables the rex-watchdog which monitors the memory usage of *Erlang's RPC component*.

( Default: true )

| -| watchdog.rex.interval | An interval of executing the watchdog processing

( Default: 10, Unit: `sec` )

| -| watchdog.rex.threshold_mem_capacity | Threshold of memory capacity of binary for Erlang rex

( Default: 33554432, Unit: `byte` )

| +| `watchdog.rex.is_enabled` | Enables or disables the rex-watchdog which monitors the memory usage of *Erlang's RPC component*.

*( Default: true )*

| +| `watchdog.rex.interval` | An interval of executing the watchdog processing

*( Default: 10, Unit: `sec` )*

| +| `watchdog.rex.threshold_mem_capacity` | Threshold of memory capacity of binary for Erlang rex

*( Default: 33554432, Unit: `byte` )*

| | **Watchdog / CPU** | -| watchdog.cpu.is_enabled | Enables or disables the CPU-watchdog which monitors both *CPU load average* and *CPU utilization*

( Default: false )

| -| watchdog.cpu.raised_error_times | Times of raising error to a client

( Default: 5 )

| -| watchdog.cpu.interval | An interval of executing the watchdog processing

( Default: 10, Unit: `sec` )

| -| watchdog.cpu.threshold_cpu_load_avg | Threshold of CPU load average

( Default: 5.0 )

| -| watchdog.cpu.threshold_cpu_util | Threshold of CPU utilization

( Default: 100 )

| +| `watchdog.cpu.is_enabled` | Enables or disables the CPU-watchdog which monitors both *CPU load average* and *CPU utilization*

*( Default: false )*

| +| `watchdog.cpu.raised_error_times` | Times of raising error to a client

*( Default: 5 )*

| +| `watchdog.cpu.interval` | An interval of executing the watchdog processing

*( Default: 10, Unit: `sec` )*

| +| `watchdog.cpu.threshold_cpu_load_avg` | Threshold of CPU load average

*( Default: 5.0 )*

| +| `watchdog.cpu.threshold_cpu_util` | Threshold of CPU utilization

*( Default: 100 )*

| | **Watchdog / DISK** | -| watchdog.disk.is_enabled | Enables or disables the

( Default: false )

| -| watchdog.disk.raised_error_times | Times of raising error to a client

( Default: 5 )

| -| watchdog.disk.interval | An interval of executing the watchdog processing

( Default: 10, Unit: `sec` )

| -| watchdog.disk.threshold_disk_use | Threshold of Disk use(%) of a target disk's capacity

( Default: 85, Unit: `percent` )

| -| watchdog.disk.threshold_disk_util | Threshold of Disk utilization

( Default: 90, Unit: `percent` )

| -| watchdog.disk.threshold_disk_rkb | Threshold of disk read KB/sec

( Default: 98304, Unit: `KB` )

| -| watchdog.disk.threshold_disk_wkb | Threshold of disk write KB/sec

( Default: 98304, Unit: `KB` )

| -| watchdog.disk.target_devices | Target devices for checking disk utilization

( Default: [] )

| +| `watchdog.disk.is_enabled` | Enables or disables the

*( Default: false )*

| +| `watchdog.disk.raised_error_times` | Times of raising error to a client

*( Default: 5 )*

| +| `watchdog.disk.interval` | An interval of executing the watchdog processing

*( Default: 10, Unit: `sec` )*

| +| `watchdog.disk.threshold_disk_use` | Threshold of Disk use(%) of a target disk's capacity

*( Default: 85, Unit: `percent` )*

| +| `watchdog.disk.threshold_disk_util` | Threshold of Disk utilization

*( Default: 90, Unit: `percent` )*

| +| `watchdog.disk.threshold_disk_rkb` | Threshold of disk read KB/sec

*( Default: 98304, Unit: `KB` )*

| +| `watchdog.disk.threshold_disk_wkb` | Threshold of disk write KB/sec

*( Default: 98304, Unit: `KB` )*

| +| `watchdog.disk.target_devices` | Target devices for checking disk utilization

*( Default: [] )*

| | **Watchdog / CLUSTER** | -| watchdog.cluster.is_enabled | Enables or disables the

( Default: false )

| -| watchdog.cluster.interval | An interval of executing the watchdog processing

( Default: 10 )

| +| `watchdog.cluster.is_enabled` | Enables or disables the

*( Default: false )*

| +| `watchdog.cluster.interval` | An interval of executing the watchdog processing

*( Default: 10 )*

| | **Watchdog / ERRORS** | -| watchdog.error.is_enabled | Enables or disables the

( Default: false )

| -| watchdog.error.interval | An interval of executing the watchdog processing

( Default: 60 )

| -| watchdog.error.threshold_count | Total counts of raising error to a client

( Default: 100 )

| +| `watchdog.error.is_enabled` | Enables or disables the

*( Default: false )*

| +| `watchdog.error.interval` | An interval of executing the watchdog processing

*( Default: 60 )*

| +| `watchdog.error.threshold_count` | Total counts of raising error to a client

*( Default: 100 )*

| | **Data Compaction** | | **Data Compaction / Basic** | -| compaction.limit_num_of_compaction_procs | Limit of a number of procs to execute data-compaction in parallel

( Default: 4 )

| -| compaction.skip_prefetch_size | Perfetch size when skipping garbage

( Default: 512 )

| -| compaction.waiting_time_regular | Regular value of compaction-proc waiting time/batch-proc

( Default: 500, Unit: `msec` )

| -| compaction.waiting_time_max | Maximum value of compaction-proc waiting time/batch-proc

( Default: 3000, Unit: `msec` )

| -| compaction.batch_procs_regular | Total number of regular compaction batch processes

( Default: 1000 )

| -| compaction.batch_procs_max | Maximum number of compaction batch processes

( Default: 1500 )

| +| `compaction.limit_num_of_compaction_procs` | Limit of a number of procs to execute data-compaction in parallel

*( Default: 4 )*

| +| `compaction.skip_prefetch_size` | Perfetch size when skipping garbage

*( Default: 512 )*

| +| `compaction.waiting_time_regular` | Regular value of compaction-proc waiting time/batch-proc

*( Default: 500, Unit: `msec` )*

| +| `compaction.waiting_time_max` | Maximum value of compaction-proc waiting time/batch-proc

*( Default: 3000, Unit: `msec` )*

| +| `compaction.batch_procs_regular` | Total number of regular compaction batch processes

*( Default: 1000 )*

| +| `compaction.batch_procs_max` | Maximum number of compaction batch processes

*( Default: 1500 )*

| | **Data Compaction / Automated Data Compaction** | -| autonomic_op.compaction.is_enabled | Enables or disables the auto-compaction

( Default: false )

| -| autonomic_op.compaction.parallel_procs | Total number of parallel processes

( Default: 1 )

| -| autonomic_op.compaction.interval | An interval time of between auto-comcations

( Default: 3600, Unit: `sec` )

| -| autonomic_op.compaction.warn_active_size_ratio | Warning ratio of active size

( Default: 70, Unit: `percent` )

| -| autonomic_op.compaction.threshold_active_size_ratio | Threshold ratio of active size. LeoStorage start data-comaction after reaching it

( Default: 60, `percent` )

| -| **MQ** | -| mq.backend_db | The MQ storage feature is pluggable which depends on bitcask and leveldb.

( Default: leveldb )

| -| mq.num_of_mq_procs | A number of mq-server's processes

( Default: 8 )

| -| mq.num_of_batch_process_max | Maximum number of bach processes of message

( Default: 3000 )

| -| mq.num_of_batch_process_regular | Regular value of bach processes of message

( Default: 1600 )

| -| mq.interval_between_batch_procs_max | Maximum value of interval between batch-procs

( Default: 3000, Unit: `msec` )

| -| mq.interval_between_batch_procs_regular | Regular value of interval between batch-procs

( Default: 500, Unit: `msec` )

| -| **Backend DB / eleveldb** | -| backend_db.eleveldb.write_buf_size | Write Buffer Size. Larger values increase performance, especially during bulk loads.
Up to two write buffers may be held in memory at the same time, so you may wish to adjust this parameter to control memory usage.Also, a larger write buffer will result in a longer recovery time the next time the database is opened. As **backend_db.eleveldb.write_buf_size** \* **obj\_containers.num\_of\_containers** memory can be consumed in total, take both into account to meet with your memory footprint requirements on LeoStorage.

( Default: 62914560 )

| -| backend_db.eleveldb.max_open_files | Max Open Files. Number of open files that can be used by the DB. You may need to increase this if your database has a large working set *(budget one open file per 2MB of working set)*.

( Default: 1000 )

| -| backend_db.eleveldb.sst_block_size | The size of a data block is controlled by the SST block size. The size represents a threshold, not a fixed count. Whenever a newly created block reaches this uncompressed size, leveldb considers it full and writes the block with its metadata to disk. The number of keys contained in the block depends upon the size of the values and keys.

( Default: 4096 )

| -| **Replication and Recovery object(s)** | -| replication.rack_awareness.rack_id | *Rack-Id* for the rack-awareness replica placement feature | -| replication.recovery.size_of_stacked_objs | Size of stacked objects. Objects are stacked to send as a bulked object to remote nodes.

( Default: 5242880, Unit: `byte` )

| -| replication.recovery.stacking_timeout | Stacking timeout. A bulked object are sent to a remote node after reaching the timeout.

( Default: 1, Unit: `sec` )

| -| **Multi Data Center Replication / Basic** | -| mdc_replication.size_of_stacked_objs | Size of stacked objects. Objects are stacked to send as a bulked object to a remote cluster.

( Default: 33554432, Unit: `byte` )

| -| mdc_replication.stacking_timeout | Stacking timeout. A bulked object are sent to a remote cluster after reaching the timeout.

( Default: 30, Unit: `sec` )

| -| mdc_replication.req_timeout | Request timeout between clusters

( Default: 30000, Unit: `msec` )

| +| `autonomic_op.compaction.is_enabled` | Enables or disables the auto-compaction

*( Default: false )*

| +| `autonomic_op.compaction.parallel_procs` | Total number of parallel processes

*( Default: 1 )*

| +| `autonomic_op.compaction.interval` | An interval time of between auto-comcations

*( Default: 3600, Unit: `sec` )*

| +| `autonomic_op.compaction.warn_active_size_ratio` | Warning ratio of active size

*( Default: 70, Unit: `percent` )*

| +| `autonomic_op.compaction.threshold_active_size_ratio` | Threshold ratio of active size. LeoStorage start data-comaction after reaching it

*( Default: 60, `percent` )*

| +| **MQ** | +| `mq.backend_db` | The MQ storage feature is pluggable which depends on bitcask and leveldb.

*( Default: leveldb )*

| +| `mq.num_of_mq_procs` | A number of mq-server's processes

*( Default: 8 )*

| +| `mq.num_of_batch_process_max` | Maximum number of bach processes of message

*( Default: 3000 )*

| +| `mq.num_of_batch_process_regular` | Regular value of bach processes of message

*( Default: 1600 )*

| +| `mq.interval_between_batch_procs_max` | Maximum value of interval between batch-procs

*( Default: 3000, Unit: `msec` )*

| +| `mq.interval_between_batch_procs_regular` | Regular value of interval between batch-procs

*( Default: 500, Unit: `msec` )*

| +| **Backend DB / eleveldb** | +| `backend_db.eleveldb.write_buf_size` | Write Buffer Size. Larger values increase performance, especially during bulk loads.
Up to two write buffers may be held in memory at the same time, so you may wish to adjust this parameter to control memory usage.Also, a larger write buffer will result in a longer recovery time the next time the database is opened. As **backend_db.eleveldb.write_buf_size** \* **obj\_containers.num\_of\_containers** memory can be consumed in total, take both into account to meet with your memory footprint requirements on LeoStorage.

( Default: 62914560 )

| +| `backend_db.eleveldb.max_open_files` | Max Open Files. Number of open files that can be used by the DB. You may need to increase this if your database has a large working set *(budget one open file per 2MB of working set)*.

*( Default: 1000 )*

| +| `backend_db.eleveldb.sst_block_size` | The size of a data block is controlled by the SST block size. The size represents a threshold, not a fixed count. Whenever a newly created block reaches this uncompressed size, leveldb considers it full and writes the block with its metadata to disk. The number of keys contained in the block depends upon the size of the values and keys.

*( Default: 4096 )*

| +| **Replication and Recovery object(s)** | +| `replication.rack_awareness.rack_id` | *Rack-Id* for the rack-awareness replica placement feature | +| `replication.recovery.size_of_stacked_objs` | Size of stacked objects. Objects are stacked to send as a bulked object to remote nodes.

*( Default: 5242880, Unit: `byte` )*

| +| `replication.recovery.stacking_timeout` | Stacking timeout. A bulked object are sent to a remote node after reaching the timeout.

*( Default: 1, Unit: `sec` )*

| +| **Multi Data Center Replication / Basic** | +| `mdc_replication.size_of_stacked_objs` | Size of stacked objects. Objects are stacked to send as a bulked object to a remote cluster.

*( Default: 33554432, Unit: `byte` )*

| +| `mdc_replication.stacking_timeout` | Stacking timeout. A bulked object are sent to a remote cluster after reaching the timeout.

*( Default: 30, Unit: `sec` )*

| +| `mdc_replication.req_timeout` | Request timeout between clusters

*( Default: 30000, Unit: `msec` )*

| | **Log** | -| log.log_level | Log level:
  • 0:debug
  • 1:info
  • 2:warn
  • 3:error

( Default: 1 )

| -| log.is_enable_access_log | Enables or disables the access-log feature

( Default: false )

| -| log.access_log_level | Access log's level:
  • 0: only regular case
  • 1: includes error cases

( Default: 0 )

| -| log.erlang | Destination of log file(s) of Erlang's log

( Default: ./log/erlang )

| -| log.app | Destination of log file(s) of LeoStorage

( Default: ./log/app )

| -| log.member_dir | Destination of log file(s) of members of storage-cluster

( Default: ./log/ring )

| -| log.ring_dir | Destination of log file(s) of RING

( Default: ./log/ring )

| -| log.is_enable_diagnosis_log | Destination of data-diagnosis log(s)

( Default: true )

| +| `log.log_level` | Log level:
  • 0:debug
  • 1:info
  • 2:warn
  • 3:error

*( Default: 1 )*

| +| `log.is_enable_access_log` | Enables or disables the access-log feature

*( Default: false )*

| +| `log.access_log_level` | Access log's level:
  • 0: only regular case
  • 1: includes error cases

*( Default: 0 )*

| +| `log.erlang` | Destination of log file(s) of Erlang's log

*( Default: ./log/erlang )*

| +| `log.app` | Destination of log file(s) of LeoStorage

*( Default: ./log/app )*

| +| `log.member_dir` | Destination of log file(s) of members of storage-cluster

*( Default: ./log/ring )*

| +| `log.ring_dir` | Destination of log file(s) of RING

*( Default: ./log/ring )*

| +| `log.is_enable_diagnosis_log` | Destination of data-diagnosis log(s)

*( Default: true )*

| | **Other Directories Settings** | -| queue_dir | Directory of queue for monitoring "RING"

( Default: ./work/queue )

| -| snmp_agent | Directory of SNMP agent configuration

( Default: ./snmp/snmpa_storage_0/LEO-STORAGE )

| +| `queue_dir` | Directory of queue for monitoring "RING"

*( Default: ./work/queue )*

| +| `snmp_agent` | Directory of SNMP agent configuration

*( Default: ./snmp/snmpa\_storage\_0/LEO-STORAGE )*

| ### Erlang VM's Related Configurations | Item | Description | |----------------------------------|-----------------------------------------| -| nodename | The format of the node name is `@`, which must be unique always in a LeoFS system

( Default: storage_0@127.0.0.1 )

| -| distributed_cookie | Sets the magic cookie of the node to `Cookie`.

- See also: Distributed Erlang

( Default: 401321b4 )

| -| erlang.kernel_poll | Kernel poll reduces LeoFS' CPU usage when it has hundreds (or more) network connections.

( Default: true )

| -| erlang.asyc_threads | The total number of Erlang aynch threads

( Default: 32 )

| -| erlang.max_ports | The max_ports sets the default value of maximum number of ports.

- See also: [Erlang erlang:open_port/2](http://erlang.org/doc/man/erlang.html)

( Default: 64000 )

| -| erlang.crash_dump | The output destination of an Erlang crash dump

( Default: ./log/erl_crash.dump )

| -| erlang.max_ets_tables | The maxinum number of Erlagn ETS tables

( Default: 256000 )

| -| erlang.smp | `-smp` enable and `-smp` start the Erlang runtime system with SMP support enabled.

( Default: enable )

| -| erlang.schedulers.compaction\_of\_load | Enables or disables scheduler compaction of load. If it's enabled, the Erlang VM will attempt to fully load as many scheduler threads as mush as possible.

( Default: true )

| -| erlang.schedulers.utilization\_balancing | Enables or disables scheduler utilization balancing of load. By default scheduler utilization balancing is disabled and instead scheduler compaction of load is enabled, which strives for a load distribution that causes as many scheduler threads as possible to be fully loaded (that is, not run out of work).

( Default: false )

| -| erlang.distribution\_buffer\_size | Sender-side network distribution buffer size *(unit: KB)*

( Default: 32768 )

| -| erlang.fullsweep\_after | Option fullsweep_after makes it possible to specify the maximum number of generational collections before forcing a fullsweep, even if there is room on the old heap. Setting the number to zero disables the general collection algorithm, that is, all live data is copied at every garbage collection.

( Default: 0 )

| -| erlang.secio | Enables or disables eager check I/O scheduling. The flag effects when schedulers will check for I/O operations possible to execute, and when such I/O operations will execute.

( Default: true )

| -| process_limit | The maxinum number of Erlang processes. Sets the maximum number of simultaneously existing processes for this system if a Number is passed as value. Valid range for Number is [1024-134217727]

( Default: 1048576 )

| +| `nodename` | The format of the node name is `@`, which must be unique always in a LeoFS system

*( Default: storage\_0@127.0.0.1 )*

| +| `distributed_cookie` | Sets the magic cookie of the node to `Cookie`.

- See also: Distributed Erlang

*( Default: 401321b4 )*

| +| `erlang.kernel_poll` | Kernel poll reduces LeoFS' CPU usage when it has hundreds (or more) network connections.

*( Default: true )*

| +| `erlang.asyc_threads` | The total number of Erlang aynch threads

( Default: 32 )

| +| `erlang.max_ports` | The max\_ports sets the default value of maximum number of ports.

- See also: [Erlang erlang:open_port/2](http://erlang.org/doc/man/erlang.html)

*( Default: 64000 )*

| +| `erlang.crash_dump` | The output destination of an Erlang crash dump

*( Default: ./log/erl\_crash.dump )*

| +| `erlang.max_ets_tables` | The maxinum number of Erlagn ETS tables

*( Default: 256000 )*

| +| `erlang.smp` | `-smp` enable and `-smp` start the Erlang runtime system with SMP support enabled.

*( Default: enable )*

| +| `erlang.schedulers.compaction_of_load` | Enables or disables scheduler compaction of load. If it's enabled, the Erlang VM will attempt to fully load as many scheduler threads as mush as possible.

*( Default: true )*

| +| `erlang.schedulers.utilization_balancing` | Enables or disables scheduler utilization balancing of load. By default scheduler utilization balancing is disabled and instead scheduler compaction of load is enabled, which strives for a load distribution that causes as many scheduler threads as possible to be fully loaded (that is, not run out of work).

*( Default: false )*

| +| `erlang.distribution_buffer_size` | Sender-side network distribution buffer size *(unit: KB)*

*( Default: 32768 )*

| +| `erlang.fullsweep_after` | Option fullsweep\_after makes it possible to specify the maximum number of generational collections before forcing a fullsweep, even if there is room on the old heap. Setting the number to zero disables the general collection algorithm, that is, all live data is copied at every garbage collection.

*( Default: 0 )*

| +| `erlang.secio` | Enables or disables eager check I/O scheduling. The flag effects when schedulers will check for I/O operations possible to execute, and when such I/O operations will execute.

*( Default: true )*

| +| `process_limit` | The maxinum number of Erlang processes. Sets the maximum number of simultaneously existing processes for this system if a Number is passed as value. Valid range for Number is [1024-134217727]

*( Default: 1048576 )*

| ### Notes and Tips of the Configuration From ec94745484040cd0d3d3c33366563f48d2f4e136 Mon Sep 17 00:00:00 2001 From: Yosuke Hara Date: Mon, 26 Mar 2018 16:11:18 +0900 Subject: [PATCH 4/9] To publish LeoFS' monitor section, updates its related docs --- docs/admin/system_admin/monitoring.md | 265 +++++++++++++++++++++++++- mkdocs.yml | 2 +- 2 files changed, 265 insertions(+), 2 deletions(-) diff --git a/docs/admin/system_admin/monitoring.md b/docs/admin/system_admin/monitoring.md index 3261fa6d..0be42622 100644 --- a/docs/admin/system_admin/monitoring.md +++ b/docs/admin/system_admin/monitoring.md @@ -1 +1,264 @@ -# System Monitoring \ No newline at end of file +# System Monitoring +## SNMPA Setup + +Each node of LeoStorage, LeoGateway and LeoManager provides a built in SNMP agent which allows to connect external systems, such as Nagios and Zabbix. You can retrieve various statistics of your LeoFS. + +### LeoManager +#### SNMPA Properties + +| Item | Value / Range | +|------|---------------| +| Port | 4020..4021, 14020..14021 | +| Branch | 1.3.6.1.4.1.35450 | +| [snmpa\_manager\_0](https://github.com/leo-project/leofs/tree/master/apps/leo_manager/snmp/snmpa_manager_0) | Port: 4020 | +| [snmpa\_manager\_1](https://github.com/leo-project/leofs/tree/master/apps/leo_manager/snmp/snmpa_manager_1) | Port: 4021 | +| [snmpa\_manager\_2](https://github.com/leo-project/leofs/tree/master/apps/leo_manager/snmp/snmpa_manager_2) | Port: 14020 | +| [snmpa\_manager\_3](https://github.com/leo-project/leofs/tree/master/apps/leo_manager/snmp/snmpa_manager_3) | Port: 14021 | + +#### SNMPA Items of Erlang-VM + +| Branch Number | Description | +|---------------|-------------| +| 1 | Node name| +| **1 min average** | +| 2 | Total numeber of processes | +| 3 | Total memory usage | +| 4 | System memory usage | +| 5 | Processes memory usage | +| 6 | ETS memory usage | +| **5 min average** | +| 7 | Total numeber of processes | +| 8 | Total memory usage | +| 9 | System memory usage | +| 10 | Processes memory usage | +| 11 | ETS memory usage | +| **Allocated memmory** | +| 12 | Used/allocated memory for 1 min | +| 13 | Allocated memory for 1 min | +| 14 | Used/allocated memory for 5 min | +| 15 | Allocated memory for 5 min | + +#### Check the configuration with `snmpwalk` command after starting LeoFS + +```bash +$ snmpwalk -v 2c -c public 127.0.0.1:4020 .1.3.6.1.4.1.35450 +SNMPv2-SMI::enterprises.35450.15.1.0 = STRING: "manager_0@127.0.0.1" +SNMPv2-SMI::enterprises.35450.15.2.0 = Gauge32: 123 +SNMPv2-SMI::enterprises.35450.15.3.0 = Gauge32: 30289989 +SNMPv2-SMI::enterprises.35450.15.4.0 = Gauge32: 24256857 +SNMPv2-SMI::enterprises.35450.15.5.0 = Gauge32: 6033132 +SNMPv2-SMI::enterprises.35450.15.6.0 = Gauge32: 1914017 +SNMPv2-SMI::enterprises.35450.15.7.0 = Gauge32: 123 +SNMPv2-SMI::enterprises.35450.15.8.0 = Gauge32: 30309552 +SNMPv2-SMI::enterprises.35450.15.9.0 = Gauge32: 24278377 +SNMPv2-SMI::enterprises.35450.15.10.0 = Gauge32: 6031175 +SNMPv2-SMI::enterprises.35450.15.11.0 = Gauge32: 1935758 +SNMPv2-SMI::enterprises.35450.15.12.0 = Gauge32: 75 +SNMPv2-SMI::enterprises.35450.15.13.0 = Gauge32: 84635402 +SNMPv2-SMI::enterprises.35450.15.14.0 = Gauge32: 78 +SNMPv2-SMI::enterprises.35450.15.15.0 = Gauge32: 88735915 +``` + +### LeoStorage +#### SNMPA Properties + +| Item | Value / Range | +|------|---------------| +| Port | 4010..4014 | +| Branch | 1.3.6.1.4.1.35450 | +| [snmpa\_storage\_0](https://github.com/leo-project/leofs/tree/master/apps/leo_storage/snmp/snmpa_storage_0) | Port: 4020 | +| [snmpa\_storage\_1](https://github.com/leo-project/leofs/tree/master/apps/leo_storage/snmp/snmpa_storage_1) | Port: 4021 | +| [snmpa\_storage\_2](https://github.com/leo-project/leofs/tree/master/apps/leo_storage/snmp/snmpa_storage_2) | Port: 14020 | +| [snmpa\_storage\_3](https://github.com/leo-project/leofs/tree/master/apps/leo_storage/snmp/snmpa_storage_3) | Port: 14021 | + +#### SNMPA Items of Erlang-VM + +| Branch Number | Description | +|---------------|-------------| +| 1 | Node name| +| **1 min average** | +| 2 | Total numeber of processes | +| 3 | Total memory usage | +| 4 | System memory usage | +| 5 | Processes memory usage | +| 6 | ETS memory usage | +| **5 min average** | +| 7 | Total numeber of processes | +| 8 | Total memory usage | +| 9 | System memory usage | +| 10 | Processes memory usage | +| 11 | ETS memory usage | +| **Allocated memmory** | +| 31 | Used/allocated memory for 1 min | +| 32 | Allocated memory for 1 min | +| 33 | Used/allocated memory for 5 min | +| 34 | Allocated memory for 5 min | + +#### SNMPA Items of LeoStorage + +| Branch Number | Description | +|---------------|-------------| +| **Request counter for 1 min** | +| 12 | Total number of WRITE requests | +| 13 | Total number of READ requests | +| 14 | Total number of DELETE requests | +| **Request counter for 5 min** | +| 15 | Total number of WRITE requests | +| 16 | Total number of READ requests | +| 17 | Total number of DELETE requests | +| **Stored objects related** | +| 18 | Total number of active objects | +| 19 | Total number of objects
*(It includes inactive objects which are removed and updated objects)* | +| 20 | Total active object size | +| 21 | Total object size | +| **MQ related** | +| 22 | Total messages of `object replication` | +| 23 | Total messages of `sync-vnodes` | +| 24 | Total messages of `rebalance` | +| 41 | Total messages of `recovery-node` *(since `v1.4.0`)* | +| 42 | Total messages of `deletion-directry` *(since `v1.4.0`)* | +| 43 | Total messages of `async deletion-directries` *(since `v1.4.0`)* | +| 44 | Total messages of a requet of `deletion-directry` *(since `v1.4.0`)* | +| 45 | Total messages of `comparison-metadata` for the multi datacenter replication *(since `v1.4.0`)* | +| 46 | Total messages of a request of `sync-object` for the multi datacenter replication *(since `v1.4.0`)* | +| **Data-compaction related** | +| 51 | data-compaction state
  • 0: `idling`
  • 1: `running`
  • 2: `suspending`
| +| 52 | Start date time of last data-compaction *(unixtime)* | +| 53 | End date time of last data-compaction *(unixtime)* | +| 54 | Total number of pending targets | +| 55 | Total number of ongoing targets | +| 56 | Total number of out of targets | + +#### Check the configuration with `snmpwalk` command after starting LeoFS + +``` +$ snmpwalk -v 2c -c public 127.0.0.1:4010 .1.3.6.1.4.1.35450 +iso.3.6.1.4.1.35450.56.1.0 = STRING: "storage_0@127.0.0.1" +iso.3.6.1.4.1.35450.56.2.0 = Gauge32: 577 +iso.3.6.1.4.1.35450.56.3.0 = Gauge32: 47509309 +iso.3.6.1.4.1.35450.56.4.0 = Gauge32: 27404799 +iso.3.6.1.4.1.35450.56.5.0 = Gauge32: 20096683 +iso.3.6.1.4.1.35450.56.6.0 = Gauge32: 5967268 +iso.3.6.1.4.1.35450.56.7.0 = Gauge32: 0 +iso.3.6.1.4.1.35450.56.8.0 = Gauge32: 0 +iso.3.6.1.4.1.35450.56.9.0 = Gauge32: 0 +iso.3.6.1.4.1.35450.56.10.0 = Gauge32: 0 +iso.3.6.1.4.1.35450.56.11.0 = Gauge32: 0 +iso.3.6.1.4.1.35450.56.12.0 = Gauge32: 0 +iso.3.6.1.4.1.35450.56.13.0 = Gauge32: 0 +iso.3.6.1.4.1.35450.56.14.0 = Gauge32: 0 +iso.3.6.1.4.1.35450.56.15.0 = Gauge32: 0 +iso.3.6.1.4.1.35450.56.16.0 = Gauge32: 0 +iso.3.6.1.4.1.35450.56.17.0 = Gauge32: 0 +iso.3.6.1.4.1.35450.56.18.0 = Gauge32: 0 +iso.3.6.1.4.1.35450.56.19.0 = Gauge32: 0 +iso.3.6.1.4.1.35450.56.20.0 = Gauge32: 0 +iso.3.6.1.4.1.35450.56.21.0 = Gauge32: 0 +iso.3.6.1.4.1.35450.56.22.0 = Gauge32: 0 +iso.3.6.1.4.1.35450.56.23.0 = Gauge32: 0 +iso.3.6.1.4.1.35450.56.24.0 = Gauge32: 0 +iso.3.6.1.4.1.35450.56.31.0 = Gauge32: 63 +iso.3.6.1.4.1.35450.56.32.0 = Gauge32: 73028949 +iso.3.6.1.4.1.35450.56.33.0 = Gauge32: 0 +iso.3.6.1.4.1.35450.56.34.0 = Gauge32: 0 +iso.3.6.1.4.1.35450.56.41.0 = Gauge32: 0 +iso.3.6.1.4.1.35450.56.42.0 = Gauge32: 0 +iso.3.6.1.4.1.35450.56.43.0 = Gauge32: 0 +iso.3.6.1.4.1.35450.56.44.0 = Gauge32: 0 +iso.3.6.1.4.1.35450.56.45.0 = Gauge32: 0 +iso.3.6.1.4.1.35450.56.46.0 = Gauge32: 0 +iso.3.6.1.4.1.35450.56.51.0 = Gauge32: 0 +iso.3.6.1.4.1.35450.56.52.0 = Gauge32: 0 +iso.3.6.1.4.1.35450.56.53.0 = Gauge32: 0 +iso.3.6.1.4.1.35450.56.54.0 = Gauge32: 0 +iso.3.6.1.4.1.35450.56.55.0 = Gauge32: 0 +iso.3.6.1.4.1.35450.56.56.0 = Gauge32: 0 +``` + +### LeoGateway +#### SNMPA Properties + +| Item | Value / Range | +|------|---------------| +| Port | 4000..4001 | +| Branch | 1.3.6.1.4.1.35450 | +| [snmpa\_gateway\_0](https://github.com/leo-project/leofs/tree/master/apps/leo_gateway/snmp/snmpa_gateway_0) | Port: 4000 | +| [snmpa\_gateway\_1](https://github.com/leo-project/leofs/tree/master/apps/leo_gateway/snmp/snmpa_gateway_1) | Port: 4001 | + +#### SNMPA Items of Erlang-VM + +| Branch Number | Description | +|---------------|-------------| +| 1 | Node name| +| **1 min average** | +| 2 | Total numeber of processes | +| 3 | Total memory usage | +| 4 | System memory usage | +| 5 | Processes memory usage | +| 6 | ETS memory usage | +| **5 min average** | +| 7 | Total numeber of processes | +| 8 | Total memory usage | +| 9 | System memory usage | +| 10 | Processes memory usage | +| 11 | ETS memory usage | +| **Allocated memmory** | +| 31 | Used/allocated memory for 1 min | +| 32 | Allocated memory for 1 min | +| 33 | Used/allocated memory for 5 min | +| 34 | Allocated memory for 5 min | + +#### SNMPA Items of LeoGateway + +| Branch Number | Description | +|---------------|-------------| +| **Request counter for 1 min** | +| 12 | Total number of WRITE requests | +| 13 | Total number of READ requests | +| 14 | Total number of DELETE requests | +| **Request counter for 5 min** | +| 15 | Total number of WRITE requests | +| 16 | Total number of READ requests | +| 17 | Total number of DELETE requests | +| **Cache related** | +| 18 | Total counts of cache-hit | +| 19 | Total counts of cache-miss | +| 20 | Total number of cached objects | +| 21 | Total cached object size | + +#### Check the configuration with `snmpwalk` command after starting LeoFS + +``` +$ snmpwalk -v 2c -c public 127.0.0.1:4000 .1.3.6.1.4.1.35450 +SNMPv2-SMI::enterprises.35450.34.1.0 = STRING: "gateway_0@127.0.0.1" +SNMPv2-SMI::enterprises.35450.34.2.0 = Gauge32: 279 +SNMPv2-SMI::enterprises.35450.34.3.0 = Gauge32: 45266128 +SNMPv2-SMI::enterprises.35450.34.4.0 = Gauge32: 36653905 +SNMPv2-SMI::enterprises.35450.34.5.0 = Gauge32: 8612223 +SNMPv2-SMI::enterprises.35450.34.6.0 = Gauge32: 2276519 +SNMPv2-SMI::enterprises.35450.34.7.0 = Gauge32: 279 +SNMPv2-SMI::enterprises.35450.34.8.0 = Gauge32: 45157433 +SNMPv2-SMI::enterprises.35450.34.9.0 = Gauge32: 36385227 +SNMPv2-SMI::enterprises.35450.34.10.0 = Gauge32: 8772210 +SNMPv2-SMI::enterprises.35450.34.11.0 = Gauge32: 2261105 +SNMPv2-SMI::enterprises.35450.34.12.0 = Gauge32: 0 +SNMPv2-SMI::enterprises.35450.34.13.0 = Gauge32: 13 +SNMPv2-SMI::enterprises.35450.34.14.0 = Gauge32: 0 +SNMPv2-SMI::enterprises.35450.34.15.0 = Gauge32: 3 +SNMPv2-SMI::enterprises.35450.34.16.0 = Gauge32: 24 +SNMPv2-SMI::enterprises.35450.34.17.0 = Gauge32: 0 +SNMPv2-SMI::enterprises.35450.34.18.0 = Gauge32: 21 +SNMPv2-SMI::enterprises.35450.34.19.0 = Gauge32: 39 +SNMPv2-SMI::enterprises.35450.34.20.0 = Gauge32: 3 +SNMPv2-SMI::enterprises.35450.34.21.0 = Gauge32: 565700 +SNMPv2-SMI::enterprises.35450.34.31.0 = Gauge32: 75 +SNMPv2-SMI::enterprises.35450.34.32.0 = Gauge32: 84635402 +SNMPv2-SMI::enterprises.35450.34.33.0 = Gauge32: 78 +SNMPv2-SMI::enterprises.35450.34.34.0 = Gauge32: 88735915 +``` + +## Related Links + +- [Administrators / Settings / LeoManager Settings](../settings/leo_manager.md) +- [Administrators / Settings / LeoStorage Settings](../settings/leo_storage.md) +- [Administrators / Settings / LeoGateway Settings](../settings/leo_gateway.md) \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml index 3ecfb6f0..b82a0e36 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -74,7 +74,7 @@ pages: - Log Management: 'admin/system_admin/log_management.md' - Persistent Configuration: 'admin/system_admin/persistent_configuration.md' ## - LeoManager's Maintenance: 'admin/system_admin/leo_manager.md' - ## - System Monitoring: 'admin/system_admin/monitoring.md' + - System Monitoring: 'admin/system_admin/monitoring.md' - System Migration: 'admin/system_admin/migration.md' ## - Backup and Restore: 'admin/system_admin/backup_and_restore.md' - System Integration: 'admin/system_admin/integration.md' From d9ab713aa2a8f470e1af445a2f4649b7dde55cad Mon Sep 17 00:00:00 2001 From: Yosuke Hara Date: Mon, 26 Mar 2018 12:03:47 +0000 Subject: [PATCH 5/9] To mutually link between each setting page and monitoring page --- docs/admin/settings/leo_gateway.md | 13 +++++++------ docs/admin/settings/leo_manager.md | 1 + docs/admin/settings/leo_storage.md | 9 +++++---- 3 files changed, 13 insertions(+), 10 deletions(-) diff --git a/docs/admin/settings/leo_gateway.md b/docs/admin/settings/leo_gateway.md index fc7132d2..2d084b37 100644 --- a/docs/admin/settings/leo_gateway.md +++ b/docs/admin/settings/leo_gateway.md @@ -150,12 +150,13 @@ How LeoGateway's cache feature works with cache related configurations is descri ## Related Links -- [Concept and Architecture / LeoGateway's Architecture](../../architecture/leo_gateway.md) -- [For Administrators / Interface / S3-API](../protocols/s3.md) -- [For Administrators / Interface / REST-API](../protocols/rest.md) -- [For Administrators / Interface / NFS v3](../protocols/nfs_v3.md) -- [For Administrators / System Operations / S3-API related Operations](../system_operations/s3.md) -- [For Administrators / Settings / Environment Configuration](/admin/settings/environment_config.md) +* [Concept and Architecture / LeoGateway's Architecture](/architecture/leo_gateway.md) +* [For Administrators / Interface / S3-API](/admin/protocols/s3.md) +* [For Administrators / Interface / REST-API](/admin/protocols/rest.md) +* [For Administrators / Interface / NFS v3](/admin/protocols/nfs_v3.md) +* [For Administrators / System Administration / System Monitoring](/admin/system_admin/monitoring.md) +* [For Administrators / System Operations / S3-API related Operations](/admin/system_operations/s3.md) +* [For Administrators / Settings / Environment Configuration](/admin/settings/environment_config.md) [^1]: Amazon S3 API [^2]: Wikipedia: Network File System diff --git a/docs/admin/settings/leo_manager.md b/docs/admin/settings/leo_manager.md index 46146657..91de6495 100644 --- a/docs/admin/settings/leo_manager.md +++ b/docs/admin/settings/leo_manager.md @@ -87,4 +87,5 @@ The default setting is to launch a LeoFS system on one node, whose setting canno * [Concept and Architecture / LeoManager's Architecture](/architecture/leo_manager.md) * [For Administrators / Settings / Cluster Settings](cluster.md) * [For Administrators / Settings / Environment Configuration](/admin/settings/environment_config.md) +* [For Administrators / System Administration / System Monitoring](/admin/system_admin/monitoring.md) * [For Administrators / System Operations / Multi Data Center Replication](/admin/system_operations/multi_dc_replication.md) diff --git a/docs/admin/settings/leo_storage.md b/docs/admin/settings/leo_storage.md index 30c63d05..42163ddd 100644 --- a/docs/admin/settings/leo_storage.md +++ b/docs/admin/settings/leo_storage.md @@ -219,10 +219,11 @@ When the each value reached the min value, the auto-compaction changes the statu ## Related Links -- [Concept and Architecture / LeoStorage's Architecture](../../architecture/leo_storage.md) -- [For Administrators / System Operations / Cluster Operations](/admin/system_operations/cluster.md) -- [For Administrators / System Operations / Data Operations](/admin/system_operations/data.md) -- [For Administrators / Settings / Environment Configuration](/admin/settings/environment_config.md) +* [Concept and Architecture / LeoStorage's Architecture](/architecture/leo_storage.md) +* [For Administrators / System Administration / System Monitoring](/admin/system_admin/monitoring.md) +* [For Administrators / System Operations / Cluster Operations](/admin/system_operations/cluster.md) +* [For Administrators / System Operations / Data Operations](/admin/system_operations/data.md) +* [For Administrators / Settings / Environment Configuration](/admin/settings/environment_config.md) [^1]: ZFS [^2]: LeoFS' Issue #987, Measure rebalance/recover-node performance according to mq.num_of_mq_procs From 0b9ab2fede350ed67d9201e955e59e26bb08a7bb Mon Sep 17 00:00:00 2001 From: Yosuke Hara Date: Mon, 26 Mar 2018 21:38:50 +0900 Subject: [PATCH 6/9] Fix location of favicon --- docs/{favicon.ico => assets/images/favicon.png} | Bin mkdocs.yml | 4 ++-- 2 files changed, 2 insertions(+), 2 deletions(-) rename docs/{favicon.ico => assets/images/favicon.png} (100%) diff --git a/docs/favicon.ico b/docs/assets/images/favicon.png similarity index 100% rename from docs/favicon.ico rename to docs/assets/images/favicon.png diff --git a/mkdocs.yml b/mkdocs.yml index b82a0e36..509de7af 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -11,8 +11,8 @@ site_name: LeoFS Documentation repo_url: https://github.com/leo-project/leofs repo_name: 'GitHub' site_author: LeoProject -site_favicon: favicon.ico -copyright: (c) 2012-2017 LeoProject. All rights reserved. +## site_favicon: favicon.ico +copyright: (c) 2012-2018 LeoProject. All rights reserved. site_description: LeoProject makes LeoFS, which is an open source distributed object storage system and a highly available, distributed, eventually consistent storage system. LeoFS is supporting the following features:Multi-protocol Support - S3-API, REST-API and NFS v3, Large Object Support, Multi Data Center Replication and others. google_analytics: ['UA-5301881-3', 'leo-project.net'] From 77708e9d9db833c0849cade892aec727f3ee808a Mon Sep 17 00:00:00 2001 From: Yosuke Hara Date: Tue, 27 Mar 2018 11:51:11 +0900 Subject: [PATCH 7/9] [doc] Fix wrong port number of LeoStorage's SNMP server --- docs/admin/system_admin/monitoring.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/admin/system_admin/monitoring.md b/docs/admin/system_admin/monitoring.md index 0be42622..2215f087 100644 --- a/docs/admin/system_admin/monitoring.md +++ b/docs/admin/system_admin/monitoring.md @@ -66,10 +66,10 @@ SNMPv2-SMI::enterprises.35450.15.15.0 = Gauge32: 88735915 |------|---------------| | Port | 4010..4014 | | Branch | 1.3.6.1.4.1.35450 | -| [snmpa\_storage\_0](https://github.com/leo-project/leofs/tree/master/apps/leo_storage/snmp/snmpa_storage_0) | Port: 4020 | -| [snmpa\_storage\_1](https://github.com/leo-project/leofs/tree/master/apps/leo_storage/snmp/snmpa_storage_1) | Port: 4021 | -| [snmpa\_storage\_2](https://github.com/leo-project/leofs/tree/master/apps/leo_storage/snmp/snmpa_storage_2) | Port: 14020 | -| [snmpa\_storage\_3](https://github.com/leo-project/leofs/tree/master/apps/leo_storage/snmp/snmpa_storage_3) | Port: 14021 | +| [snmpa\_storage\_0](https://github.com/leo-project/leofs/tree/master/apps/leo_storage/snmp/snmpa_storage_0) | Port: 4010 | +| [snmpa\_storage\_1](https://github.com/leo-project/leofs/tree/master/apps/leo_storage/snmp/snmpa_storage_1) | Port: 4011 | +| [snmpa\_storage\_2](https://github.com/leo-project/leofs/tree/master/apps/leo_storage/snmp/snmpa_storage_2) | Port: 4012 | +| [snmpa\_storage\_3](https://github.com/leo-project/leofs/tree/master/apps/leo_storage/snmp/snmpa_storage_3) | Port: 4013 | #### SNMPA Items of Erlang-VM From b6861940b1b730c014a928f27e949b1a532c74fc Mon Sep 17 00:00:00 2001 From: Yosuke Hara Date: Tue, 27 Mar 2018 11:52:13 +0900 Subject: [PATCH 8/9] [storage] To strictly check integer values --- .../src/leo_storage_statistics.erl | 26 ++++++++++++++----- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/apps/leo_storage/src/leo_storage_statistics.erl b/apps/leo_storage/src/leo_storage_statistics.erl index 31c10fb0..200a59aa 100644 --- a/apps/leo_storage/src/leo_storage_statistics.erl +++ b/apps/leo_storage/src/leo_storage_statistics.erl @@ -104,21 +104,31 @@ get_and_set_mq_value([]) -> get_and_set_mq_value([{?SNMP_MQ_NUM_OF_DEL_DIR = Id, ?QUEUE_ID_DEL_DIR}|Rest]) -> V = lists:foldl( fun(QId, SoFar) -> - N = case catch leo_mq_api:status(QId) of + RetN = case catch leo_mq_api:status(QId) of {ok, Ret} -> - leo_misc:get_value( - ?MQ_CNS_PROP_NUM_OF_MSGS, Ret, 0); + case leo_misc:get_value( + ?MQ_CNS_PROP_NUM_OF_MSGS, Ret, 0) of + N when is_number(N) -> + N; + _ -> + 0 + end; _ -> 0 end, - SoFar + N + SoFar + RetN end, 0, ?del_dir_queue_list()), catch snmp_generic:variable_set(Id, V), get_and_set_mq_value(Rest); get_and_set_mq_value([{Id, QId}|Rest]) -> V = case catch leo_mq_api:status(QId) of {ok, Ret} -> - leo_misc:get_value(?MQ_CNS_PROP_NUM_OF_MSGS, Ret, 0); + case leo_misc:get_value(?MQ_CNS_PROP_NUM_OF_MSGS, Ret, 0) of + N when is_integer(N) -> + N; + _ -> + 0 + end; _ -> 0 end, @@ -214,7 +224,7 @@ to_unixtime(DateTime) -> %% @private -check_number(V) -> +check_number(V) when is_number(V) -> case (leo_math:power(2,32) =< V) of true -> 4294967296; @@ -222,4 +232,6 @@ check_number(V) -> 0; false -> V - end. + end; +check_number(_) -> + 0. From 714371f1e0ab3d1e56bdd5de5d6a1605fb085b0a Mon Sep 17 00:00:00 2001 From: Yosuke Hara Date: Tue, 27 Mar 2018 14:46:55 +0900 Subject: [PATCH 9/9] [storage] To strictly check integer values(2) --- apps/leo_storage/src/leo_storage_statistics.erl | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/apps/leo_storage/src/leo_storage_statistics.erl b/apps/leo_storage/src/leo_storage_statistics.erl index 200a59aa..e0c3919c 100644 --- a/apps/leo_storage/src/leo_storage_statistics.erl +++ b/apps/leo_storage/src/leo_storage_statistics.erl @@ -118,7 +118,7 @@ get_and_set_mq_value([{?SNMP_MQ_NUM_OF_DEL_DIR = Id, ?QUEUE_ID_DEL_DIR}|Rest]) - end, SoFar + RetN end, 0, ?del_dir_queue_list()), - catch snmp_generic:variable_set(Id, V), + catch snmp_generic:variable_set(Id, check_number(V)), get_and_set_mq_value(Rest); get_and_set_mq_value([{Id, QId}|Rest]) -> V = case catch leo_mq_api:status(QId) of @@ -132,7 +132,7 @@ get_and_set_mq_value([{Id, QId}|Rest]) -> _ -> 0 end, - catch snmp_generic:variable_set(Id, V), + catch snmp_generic:variable_set(Id, check_number(V)), get_and_set_mq_value(Rest). @@ -225,9 +225,10 @@ to_unixtime(DateTime) -> %% @private check_number(V) when is_number(V) -> - case (leo_math:power(2,32) =< V) of + Max = leo_math:power(2, 32), + case (Max =< V) of true -> - 4294967296; + Max; false when V < 0 -> 0; false ->