From 54c163abfb08b5c96e90116aa62103163e22f993 Mon Sep 17 00:00:00 2001 From: Hyeon Sung Date: Thu, 25 Apr 2024 00:12:06 +0900 Subject: [PATCH 1/7] [refactor] code simplification, structure changes and translation (#1827) Co-authored-by: tomsun28 --- .../collect/icmp/IcmpCollectImpl.java | 29 +++++++++---------- .../collector/collect/jmx/JmxCollectImpl.java | 17 +++++------ 2 files changed, 22 insertions(+), 24 deletions(-) diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/collect/icmp/IcmpCollectImpl.java b/collector/src/main/java/org/apache/hertzbeat/collector/collect/icmp/IcmpCollectImpl.java index 624e184ce2f..e8042679eec 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/collect/icmp/IcmpCollectImpl.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/collect/icmp/IcmpCollectImpl.java @@ -41,14 +41,14 @@ public IcmpCollectImpl(){} @Override public void collect(CollectRep.MetricsData.Builder builder, long monitorId, String app, Metrics metrics) { long startTime = System.currentTimeMillis(); - // 简单校验必有参数 + // Simple validation requires mandatory parameters if (metrics == null || metrics.getIcmp() == null) { builder.setCode(CollectRep.Code.FAIL); builder.setMsg("ICMP collect must has icmp params"); return; } IcmpProtocol icmp = metrics.getIcmp(); - // 超时时间默认6000毫秒 + // The default timeout is 6000 milliseconds int timeout = 6000; try { timeout = Integer.parseInt(icmp.getTimeout()); @@ -56,26 +56,25 @@ public void collect(CollectRep.MetricsData.Builder builder, long monitorId, Stri log.warn(e.getMessage()); } try { - // todo need root java jcm to use ICMP, else it telnet the peer server 7 port available - // todo 需要配置java虚拟机root权限从而使用ICMP,否则是判断telnet对端7号端口是否开通 + // todo requires Java JVM with root permissions to use ICMP, otherwise check if telnet is available on peer server's port 7 + // todo requires configuring Java JVM with root permissions to use ICMP, otherwise check if telnet is available on the peer's port 7 // todo https://stackoverflow.com/questions/11506321/how-to-ping-an-ip-address boolean status = InetAddress.getByName(icmp.getHost()).isReachable(timeout); long responseTime = System.currentTimeMillis() - startTime; - if (status) { - CollectRep.ValueRow.Builder valueRowBuilder = CollectRep.ValueRow.newBuilder(); - for (String alias : metrics.getAliasFields()) { - if (CollectorConstants.RESPONSE_TIME.equalsIgnoreCase(alias)) { - valueRowBuilder.addColumns(Long.toString(responseTime)); - } else { - valueRowBuilder.addColumns(CommonConstants.NULL_VALUE); - } - } - builder.addValues(valueRowBuilder.build()); - } else { + if (!status) { builder.setCode(CollectRep.Code.UN_REACHABLE); builder.setMsg("Un Reachable, Timeout " + timeout + "ms"); return; } + CollectRep.ValueRow.Builder valueRowBuilder = CollectRep.ValueRow.newBuilder(); + for (String alias : metrics.getAliasFields()) { + if (CollectorConstants.RESPONSE_TIME.equalsIgnoreCase(alias)) { + valueRowBuilder.addColumns(Long.toString(responseTime)); + } else { + valueRowBuilder.addColumns(CommonConstants.NULL_VALUE); + } + } + builder.addValues(valueRowBuilder.build()); } catch (UnknownHostException unknownHostException) { String errorMsg = CommonUtil.getMessageFromThrowable(unknownHostException); builder.setCode(CollectRep.Code.UN_REACHABLE); diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/collect/jmx/JmxCollectImpl.java b/collector/src/main/java/org/apache/hertzbeat/collector/collect/jmx/JmxCollectImpl.java index e3ca9e3aef6..73c380030a8 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/collect/jmx/JmxCollectImpl.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/collect/jmx/JmxCollectImpl.java @@ -53,6 +53,7 @@ import org.apache.hertzbeat.common.entity.job.protocol.JmxProtocol; import org.apache.hertzbeat.common.entity.message.CollectRep; import org.apache.hertzbeat.common.util.CommonUtil; +import org.springframework.util.Assert; import org.springframework.util.StringUtils; /** @@ -76,8 +77,8 @@ public JmxCollectImpl() { public void collect(CollectRep.MetricsData.Builder builder, long monitorId, String app, Metrics metrics) { try { - JmxProtocol jmxProtocol = metrics.getJmx(); validateParams(metrics); + JmxProtocol jmxProtocol = metrics.getJmx(); // Create a jndi remote connection JMXConnector jmxConnector = getConnectSession(jmxProtocol); @@ -161,14 +162,12 @@ private Map extractAttributeValue(AttributeList attributeList) { return attributeValueMap; } - private void validateParams(Metrics metrics) throws IllegalArgumentException { - if (metrics == null || metrics.getJmx() == null) { - throw new IllegalArgumentException("JMX collect must has jmx params"); - } - if (StringUtils.hasText(metrics.getJmx().getUrl())) { - if (metrics.getJmx().getUrl().contains(IGNORED_STUB)) { - throw new IllegalArgumentException("JMX url prohibit contains stub, please check"); - } + private void validateParams(Metrics metrics) { + Assert.isTrue(metrics != null && metrics.getJmx() != null, "JMX collect must have JMX params"); + + String url = metrics.getJmx().getUrl(); + if (StringUtils.hasText(url)) { + Assert.doesNotContain(url, IGNORED_STUB, "JMX url prohibit contains stub, please check"); } } From 690eade95d3de56e0bbfff9d012b17220308a235 Mon Sep 17 00:00:00 2001 From: LiuTianyou Date: Wed, 24 Apr 2024 23:19:51 +0800 Subject: [PATCH 2/7] [doc] add help document for mongodb monitoring (#1834) Co-authored-by: liutianyou Co-authored-by: tomsun28 --- home/docs/help/guide.md | 1 + home/docs/help/mongodb.md | 96 +++++++++++++++++++ .../current/help/guide.md | 2 + .../current/help/mongodb.md | 96 +++++++++++++++++++ home/sidebars.json | 4 +- 5 files changed, 197 insertions(+), 2 deletions(-) create mode 100644 home/docs/help/mongodb.md create mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/mongodb.md diff --git a/home/docs/help/guide.md b/home/docs/help/guide.md index 1783e048744..86b21c6feec 100644 --- a/home/docs/help/guide.md +++ b/home/docs/help/guide.md @@ -36,6 +36,7 @@ sidebar_label: Help Center  👉 [OpenGauss database monitoring](opengauss)
 👉 [IoTDB database monitoring](iotdb)
 👉 [TiDB database monitoring](tidb)
+ 👉 [MongoDB database monitoring](mongodb)
### Operating system monitoring diff --git a/home/docs/help/mongodb.md b/home/docs/help/mongodb.md new file mode 100644 index 00000000000..4a2951ec23c --- /dev/null +++ b/home/docs/help/mongodb.md @@ -0,0 +1,96 @@ +--- +id: mongodb +title: Monitoring:MongoDB +sidebar_label: MongoDB database +keywords: [ open source monitoring tool, open source database monitoring tool, monitoring MongoDB database metrics ] +--- + +> Collect and monitor the general performance Metrics of MongoDB database. + +### Configuration parameter + +| Parameter name | Parameter help description | +|------------------------|-------------------------------------------------------------------------------------------------------------------------| +| Target Host | Monitored IPV4, IPV6 or domain name. Note⚠️Without protocol header (eg: https://, http://). | +| Monitoring name | Identify the name of this monitoring. The name needs to be unique. | +| Port | Port provided by the database. The default is 27017. | +| Username | Username for MongoDB,Optional. | +| Password | Password for MongoDB,Optional. | +| database | Database name. | +| authenticationDatabase | Credentials Storage Database. | +| Connect Timeout(ms) | Set connection timeout for MongoDB, unit: ms, default: 6000ms. | +| Collection interval | Interval time of monitor periodic data collection, unit: second, and the minimum interval that can be set is 30 seconds | +| Bind Tags | Used to classify and manage monitoring resources. | +| Description remarks | For more information about identifying and describing this monitoring, users can note information here. | + +### Collection Metric + +#### Metric set:Build Info + +| Metric name | Metric unit | Metric help description | +|------------------|-------------|-----------------------------------------------------------------------------------------| +| version | none | The version number of the MongoDB server. | +| gitVersion | none | The Git version of the MongoDB codebase. | +| sysInfo | none | System information, typically includes details about the operating system and platform. | +| loaderFlags | none | Loader flags used to link MongoDB binaries | +| compilerFlags | none | Compiler flags used when compiling MongoDB. | +| allocator | none | The memory allocator used by MongoDB. | +| javascriptEngine | none | The JavaScript engine used by MongoDB. | + +#### Metric set:Server Document + +| Metric name | Metric unit | Metric help description | +|-------------|-------------|-----------------------------------| +| deleted | none | The number of documents deleted. | +| inserted | none | The number of documents inserted. | +| returned | none | The number of documents returned. | +| updated | none | The number of documents updated. | + +#### Metric set:Server Operation + +| Metric name | Metric unit | Metric help description | +|----------------|-------------|------------------------------------------------------------------| +| scanAndOrder | none | The number of times a query required both scanning and ordering. | +| writeConflicts | none | The number of write conflicts that occurred. | + +#### Metric set: Max Connections + +| Metric name | Metric unit | Metric help description | +|------------------|-------------|--------------------------------------------| +| deletedDocuments | none | Number of deleted documents. | +| passes | none | Total number of passes for TTL operations. | + +#### Metric set:System Info + +| Metric name | Metric unit | Metric help description | +|-------------|-------------|------------------------------------------------------| +| currentTime | none | Current system time. | +| hostname | none | Hostname of the server. | +| cpuAddrSize | MB | Size of CPU address in bits. | +| memSizeMB | MB | Total size of system memory in megabytes. | +| memLimitMB | MB | Memory limit for the MongoDB process in megabytes. | +| numCores | none | Total number of CPU cores. | +| cpuArch | none | CPU architecture. | +| numaEnabled | none | Whether NUMA (Non-Uniform Memory Access) is enabled. | + +#### Metric set:OS Info + +| Metric name | Metric unit | Metric help description | +|-------------|-------------|----------------------------------| +| type | none | Type of the operating system. | +| name | none | Name of the operating system. | +| version | none | Version of the operating system. | + +#### Metric set:Extra Info + +| Metric name | Metric unit | Metric help description | +|-----------------|-------------|--------------------------------------------------------| +| versionString | none | String describing the version of the operating system. | +| libcVersion | none | Version of the C standard library (libc). | +| kernelVersion | none | Version of the operating system kernel. | +| cpuFrequencyMHz | none | Frequency of the CPU in megahertz. | +| cpuFeatures | none | Features supported by the CPU. | +| pageSize | none | Size of a memory page in bytes. | +| numPages | none | Total number of memory pages. | +| maxOpenFiles | none | Maximum number of open files allowed. | + diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/guide.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/guide.md index 207936f8e8e..24f6382e02c 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/guide.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/guide.md @@ -34,6 +34,7 @@ sidebar_label: 帮助入门  👉 [OpenGauss数据库监控](opengauss)
 👉 [IoTDB数据库监控](iotdb)
 👉 [TiDB数据库监控](tidb)
+  👉 [MongoDB数据库监控](mongodb)
### 操作系统监控 @@ -138,6 +139,7 @@ sidebar_label: 帮助入门  👉 [达梦数据库监控](dm)
 👉 [OpenGauss数据库监控](opengauss)
 👉 [IoTDB数据库监控](iotdb)
+  👉 [MongoDB数据库监控](mongodb)
### 操作系统监控 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/mongodb.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/mongodb.md new file mode 100644 index 00000000000..23c4a866809 --- /dev/null +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/mongodb.md @@ -0,0 +1,96 @@ +--- +id: mongodb +title: 监控:MongoDB数据库 +sidebar_label: MongoDB数据库 +keywords: [ 开源监控系统, 开源数据库监控, MongoDB数据库监控 ] +--- + +> 对MongoDB数据库的通用性能指标进行采集监控。 + +### 配置参数 + +| 参数名称 | 参数帮助描述 | +|--------|------------------------------------------------------| +| 目标Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | 数据库对外提供的端口,默认为27017。 | +| 用户名 | MongoDB用户名,可选。 | +| 密码 | MongoDB密码,可选。 | +| 数据库 | 数据库名称 | +| 认证数据库 | 存储用户凭据的数据库名称。 | +| 连接超时时间 | 设置连接MongoDB未响应数据时的超时时间,单位ms毫秒,默认6000毫秒。 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒。 | +| 绑定标签 | 用于对监控资源进行分类管理。 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息。 | + +### 采集指标 + +#### 指标集合:构建信息 + +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------------|------|-------------------------| +| version | 无 | MongoDB版本信息 | +| gitVersion | 无 | 源代码git版本 | +| sysInfo | 无 | 系统信息 | +| loaderFlags | 无 | 加载器标志 | +| compilerFlags | 无 | 编译器标志 | +| allocator | 无 | MongoDB所使用的内存分配器 | +| javascriptEngine | 无 | MongoDB所使用的JavaScript引擎 | + +#### 指标集合:服务器文档 + +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------|------|--------| +| deleted | 无 | 已删除数 | +| inserted | 无 | 已插入数 | +| returned | 无 | 已返回数 | +| updated | 无 | 已更新数 | + +#### 指标集合:服务器操作 + +| 指标名称 | 指标单位 | 指标帮助描述 | +|----------------|------|-------------------| +| scanAndOrder | 无 | 执行查询时需要扫描并进行排序的次数 | +| writeConflicts | 无 | 写冲突的次数 | + +#### 指标集合: 服务器_ttl + +| 指标名称 | 指标单位 | 指标帮助描述 | +|------------------|------|-------------------------------| +| deletedDocuments | 无 | 删除的过期文档数量 | +| passes | 无 | TTL清理过程的总传递次数,每次传递会检查并删除过期的文档 | + +#### 指标集合:系统信息 + +| 指标名称 | 指标单位 | 指标帮助描述 | +|-------------|------|-----------------------| +| currentTime | 无 | 当前时间 | +| hostname | 无 | 主机名 | +| cpuAddrSize | MB | CPU的地址位数 | +| memSizeMB | MB | 内存大小(MB) | +| memLimitMB | MB | MongoDB进程的内存限制(MB) | +| numCores | 无 | CPU 核数 | +| cpuArch | 无 | CPU 架构 | +| numaEnabled | 无 | 是否启用了NUMA(非一致性内存访问)架构 | + +#### 指标集合:操作系统信息 + +| 指标名称 | 指标单位 | 指标帮助描述 | +|---------|------|--------| +| type | 无 | 操作系统类型 | +| name | 无 | 操作系统名称 | +| version | 无 | 版本号 | + +#### 指标集合:额外信息 + +| 指标名称 | 指标单位 | 指标帮助描述 | +|-----------------|------|----------------------| +| versionString | 无 | 版本 | +| libcVersion | 无 | 标准库版本 | +| kernelVersion | 无 | 内核版本 | +| cpuFrequencyMHz | 无 | CPU 频率(兆赫兹) | +| cpuFeatures | 无 | CPU的特性列表,包括支持的指令集和特性 | +| pageSize | 无 | 内存页大小 | +| numPages | 无 | 内存页数量 | +| maxOpenFiles | 无 | 系统中允许打开的最大文件数 | + diff --git a/home/sidebars.json b/home/sidebars.json index 8372a4fb2ca..a5e5cd1310a 100644 --- a/home/sidebars.json +++ b/home/sidebars.json @@ -166,7 +166,8 @@ "help/dm", "help/opengauss", "help/nebulaGraph", - "help/tidb" + "help/tidb", + "help/mongodb" ] }, @@ -270,7 +271,6 @@ "type": "category", "label": "Others", "items": [ - "others/developer", "others/contributing", "others/resource" ] From 381ad50b1db6cb62daa559d972425514f79610d4 Mon Sep 17 00:00:00 2001 From: "P.P" <118056735+Pzz-2021@users.noreply.github.com> Date: Wed, 24 Apr 2024 23:54:31 +0800 Subject: [PATCH 3/7] [collector] fix: inverts the compareTo sort of MetricsCollect run queue (#1837) Co-authored-by: tomsun28 --- .../org/apache/hertzbeat/collector/dispatch/MetricsCollect.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/dispatch/MetricsCollect.java b/collector/src/main/java/org/apache/hertzbeat/collector/dispatch/MetricsCollect.java index 6be9f859e03..a320054995d 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/dispatch/MetricsCollect.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/dispatch/MetricsCollect.java @@ -406,6 +406,6 @@ private void setNewThreadName(long monitorId, String app, long startTime, Metric @Override public int compareTo(MetricsCollect collect) { - return runPriority - collect.runPriority; + return collect.runPriority - this.runPriority; } } From 2fa3b5aed05ab5591faaea616c2236d37f4e86ce Mon Sep 17 00:00:00 2001 From: Jast <745925668@qq.com> Date: Thu, 25 Apr 2024 14:10:21 +0800 Subject: [PATCH 4/7] [doc]Doc add debian system (#1842) Co-authored-by: zhangshenghang --- home/docs/help/debian.md | 101 +++++++++++++++++ .../current/help/debian.md | 105 ++++++++++++++++++ home/sidebars.json | 1 + 3 files changed, 207 insertions(+) create mode 100644 home/docs/help/debian.md create mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/debian.md diff --git a/home/docs/help/debian.md b/home/docs/help/debian.md new file mode 100644 index 00000000000..65940c34a39 --- /dev/null +++ b/home/docs/help/debian.md @@ -0,0 +1,101 @@ +--- +id: debian +title: Monitoring Debian System Monitoring +sidebar_label: Debian +keywords: [Open Source Monitoring System, Operating System Monitoring, Debian Monitoring] +--- +> Collect and monitor general performance metrics of the Debian system. + +## Configuration Parameters + + +| Parameter Name | Metric help description | +| ----------------------- | ----------------------------------------------------------------------------------------------------------------- | +| Target Host | The monitored destination IPV4, IPV6, or domain name. Note: no protocol header (e.g., https://, http://). | +| Task Name | A unique name to identify this monitoring task. | +| Port | SSH port of the Debian system, default: 22 | +| Timeout | Timeout for the connection, in milliseconds, default: 6000 milliseconds. | +| Connection Reuse | Whether to reuse the SSH connection, default: false. False means a new connection will be created for each query. | +| Username | Server username | +| Password | Server password | +| Collector | Configure which collector to use for scheduling this monitoring. | +| Monitoring Period | The interval for periodically collecting data, in seconds, with a minimum interval of 30 seconds. | +| Binding Tags | Used for categorizing and managing monitoring resources. | +| Metric help description | Additional notes and Metric help descriptions for this monitoring, users can add notes here. | +| Key | Key required to connect to the server. | + +### Monitoring Metrics + +#### Metric Set: Basic System Information + + +| Metric Name | Metric Unit | Metric help description | +| -------------- | ----------- | ------------------------ | +| Host Name | N/A | Host name | +| System Version | N/A | Operating system version | +| Uptime | N/A | Boot time | + +#### Metric Set: CPU Information + + +| Metric Name | Metric Unit | Metric help description | +| -------------- | ----------- | ----------------------- | +| Info | N/A | Model | +| Cores | N/A | Number of cores | +| Interrupt | N/A | Number of interrupts | +| Load | N/A | Load | +| Context Switch | N/A | Context switches | +| Usage | % | Usage rate | + +#### Metric Set: Memory Information + + +| Metric Name | Metric Unit | Metric help description | +| ------------------- | ----------- | ---------------------------- | +| Total Memory | Mb | Total memory capacity | +| User Program Memory | Mb | Memory used by user programs | +| Free Memory | Mb | Free memory capacity | +| Buff Cache Memory | Mb | Memory used by cache | +| Available Memory | Mb | Available memory | +| Memory Usage | % | Memory usage rate | + +#### Metric Set: Disk Information + + +| Metric Name | Metric Unit | Metric help description | +| ------------- | ----------- | ----------------------------- | +| Disk Num | N/A | Total number of disks | +| Partition Num | N/A | Total number of partitions | +| Block Write | N/A | Number of disk blocks written | +| Block Read | N/A | Number of disk blocks read | +| Write Rate | iops | Disk write rate | + +#### Metric Set: Network Interface Information + +Statistics for all network interface cards, including interface name, incoming data traffic, and outgoing data traffic. +Metric Unit: Mb + +#### Metric Set: File System + +Statistics for all mounted file systems. Statistics include: file system, usage, available space, usage rate, mount point. +Metric Unit: + +- Usage: Mb +- Available Space: Mb +- Usage Rate: % + +#### Metric Set: Top 10 CPU Processes + +Statistics for the top 10 processes by CPU usage. Statistics include: process ID, CPU usage rate, memory usage rate, command being executed. +Metric Unit: + +- CPU Usage Rate: % +- Memory Usage Rate: % + +#### Metric Set: Top 10 Memory Processes + +Statistics for the top 10 processes by memory usage. Statistics include: process ID, memory usage rate, CPU usage rate, command being executed. +Metric Unit: + +- Memory Usage Rate: % +- CPU Usage Rate: % diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/debian.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/debian.md new file mode 100644 index 00000000000..dcda89ee2b6 --- /dev/null +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/debian.md @@ -0,0 +1,105 @@ +--- +id: debian +title: 监控:debian 系统监控 +sidebar_label: Debian +keywords: [开源监控系统, 操作系统监控, Debian监控] +--- +> 对Debian系统的通用性能指标进行采集监控 + +## 配置参数 + + +| 参数名称 | 参数帮助描述 | +| -------- | ------------------------------------------------------------------------- | +| 目标Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 端口 | Debian系统的ssh端口,默认: 22 | +| 超时时间 | 设置连接的超时时间,单位ms毫秒,默认6000毫秒。 | +| 复用连接 | 设置SSH连接是否复用,默认为:false。为false每次回去信息都会创建一个连接 | +| 用户名 | 服务器用户名 | +| 密码 | 服务器密码 | +| 采集器 | 配置此监控使用哪台采集器调度采集 | +| 监控周期 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 绑定标签 | 用于对监控资源进行分类管理 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | +| 密钥 | 连接服务器所需密钥 | + +### 采集指标 + +#### 指标集合:系统基本信息 + + +| 指标名称 | 指标单位 | 指标帮助描述 | +| -------------- | -------- | ------------ | +| Host Name | 无 | 主机名称 | +| System Version | 无 | 操作系统版本 | +| Uptime | 无 | 启动时间 | + +#### 指标集合:CPU 信息 + + +| 指标名称 | 指标单位 | 指标帮助描述 | +| -------------- | -------- | ------------ | +| Info | 无 | 型号 | +| Cores | 无 | 核数 | +| Interrupt | 无 | 中断数 | +| Load | 无 | 负载 | +| Context Switch | 无 | 上下文切换 | +| Usage | % | 使用率 | + +#### 指标集合:内存信息 + + +| 指标名称 | 指标单位 | 指标帮助描述 | +| ------------------- | -------- | -------------- | +| Total Memory | Mb | 总内存容量 | +| User Program Memory | Mb | 用户程序内存量 | +| Free Memory | Mb | 空闲内存容量 | +| Buff Cache Memory | Mb | 缓存占用内存 | +| Available Memory | Mb | 剩余可用内存 | +| Memory Usage | % | 内存使用率 | + +#### 指标集合:磁盘信息 + + +| 指标名称 | 指标单位 | 指标帮助描述 | +| ------------- | -------- | ------------ | +| Disk Num | 无 | 磁盘总数 | +| Partition Num | 无 | 分区总数 | +| Block Write | 无 | 写磁盘块数 | +| Block Read | 无 | 读磁盘块数 | +| Write Rate | iops | 磁盘写速率 | + +#### 指标集合:网卡信息 + +统计所有网卡的网卡名称、入站数据流量、出站数据流量。 + +单位:Mb + +#### 指标集合:文件系统 + +统计所有挂载的文件系统的使用情况。统计信息包括:文件系统、已使用量、可用量、使用率、挂载点。 + +单位: + +- 已使用量:Mb +- 可用量:Mb +- 使用率:% + +#### 指标集合:Top10 CPU进程 + +统计进程使用CPU的Top10进程。统计信息包括:进程ID、CPU占用率、内存占用率、执行命令。 + +单位: + +- CPU占用率:% +- 内存占用率:% + +#### 指标集合:Top10 内存进程 + +统计进程使用内存的Top10进程。统计信息包括:进程ID、内存占用率、CPU占用率、执行命令。 + +单位: + +- 内存占用率:% +- CPU占用率:% diff --git a/home/sidebars.json b/home/sidebars.json index a5e5cd1310a..ee96933da7d 100644 --- a/home/sidebars.json +++ b/home/sidebars.json @@ -186,6 +186,7 @@ "help/linux", "help/windows", "help/ubuntu", + "help/debian", "help/centos" ] }, From 4a3e27382c2111cbe700afea6ccf93b8855cfe06 Mon Sep 17 00:00:00 2001 From: Jast <745925668@qq.com> Date: Thu, 25 Apr 2024 14:30:39 +0800 Subject: [PATCH 5/7] [feature] add Apache Hbase RegionServer monitoring (#1833) Co-authored-by: zhangshenghang Co-authored-by: zhangshenghang Co-authored-by: tomsun28 --- home/docs/help/hbase_regionserver.md | 96 +++ .../current/help/hbase_master.md | 2 +- .../current/help/hbase_regionserver.md | 97 +++ home/sidebars.json | 2 + .../define/app-hbase_regionserver.yml | 578 ++++++++++++++++++ 5 files changed, 774 insertions(+), 1 deletion(-) create mode 100644 home/docs/help/hbase_regionserver.md create mode 100644 home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hbase_regionserver.md create mode 100644 manager/src/main/resources/define/app-hbase_regionserver.yml diff --git a/home/docs/help/hbase_regionserver.md b/home/docs/help/hbase_regionserver.md new file mode 100644 index 00000000000..86d9c7e84bb --- /dev/null +++ b/home/docs/help/hbase_regionserver.md @@ -0,0 +1,96 @@ +--- +id: hbase_regionserver +title: Monitoring HBase RegionServer Monitoring +sidebar_label: HBase RegionServer Monitoring +keywords: [Open-source monitoring system, Open-source database monitoring, RegionServer monitoring] +--- +> Collect and monitor common performance metrics for HBase RegionServer. + +**Protocol:** HTTP + +## Pre-Monitoring Operations + +Review the `hbase-site.xml` file to obtain the value of the `hbase.regionserver.info.port` configuration item, which is used for monitoring. + +## Configuration Parameters + + +| Parameter Name | Parameter Description | +| ------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------- | +| Target Host | The IPV4, IPV6, or domain name of the monitored entity. Note ⚠️ Do not include the protocol header (e.g., https://, http://). | +| Port | The port number of the HBase regionserver, default is 16030, i.e., the value of the`hbase.regionserver.info.port` parameter | +| Task Name | A unique name to identify this monitoring task. | +| Query Timeout | Set the timeout for Kafka connections in milliseconds, default is 3000 ms. | +| Collection Interval | The interval time for periodic data collection in seconds, with a minimum interval of 30 seconds. | +| Probe Before Adding | Whether to probe and check the availability of monitoring before adding new monitoring, only proceed with the addition if the probe is successful. | +| Description Note | Additional notes to identify and describe this monitoring, users can add notes here. | + +### Collection Metrics + +> All metric names are directly referenced from the official fields, hence there may be non-standard naming. + +#### Metric Set: server + + +| Metric Name | Unit | Metric Description | +| --------------------------------- | ----- | ------------------------------------------------------------------------- | +| regionCount | None | Number of Regions | +| readRequestCount | None | Number of read requests since cluster restart | +| writeRequestCount | None | Number of write requests since cluster restart | +| averageRegionSize | MB | Average size of a Region | +| totalRequestCount | None | Total number of requests | +| ScanTime_num_ops | None | Total number of Scan requests | +| Append_num_ops | None | Total number of Append requests | +| Increment_num_ops | None | Total number of Increment requests | +| Get_num_ops | None | Total number of Get requests | +| Delete_num_ops | None | Total number of Delete requests | +| Put_num_ops | None | Total number of Put requests | +| ScanTime_mean | None | Average time of a Scan request | +| ScanTime_min | None | Minimum time of a Scan request | +| ScanTime_max | None | Maximum time of a Scan request | +| ScanSize_mean | bytes | Average size of a Scan request | +| ScanSize_min | None | Minimum size of a Scan request | +| ScanSize_max | None | Maximum size of a Scan request | +| slowPutCount | None | Number of slow Put operations | +| slowGetCount | None | Number of slow Get operations | +| slowAppendCount | None | Number of slow Append operations | +| slowIncrementCount | None | Number of slow Increment operations | +| slowDeleteCount | None | Number of slow Delete operations | +| blockCacheSize | None | Size of memory used by block cache | +| blockCacheCount | None | Number of blocks in Block Cache | +| blockCacheExpressHitPercent | None | Block cache hit ratio | +| memStoreSize | None | Size of Memstore | +| FlushTime_num_ops | None | Number of RS writes to disk/Memstore flushes | +| flushQueueLength | None | Length of Region Flush queue | +| flushedCellsSize | None | Size flushed to disk | +| storeFileCount | None | Number of Storefiles | +| storeCount | None | Number of Stores | +| storeFileSize | None | Size of Storefiles | +| compactionQueueLength | None | Length of Compaction queue | +| percentFilesLocal | None | Percentage of HFile in local HDFS Data Node | +| percentFilesLocalSecondaryRegions | None | Percentage of HFile for secondary region replicas in local HDFS Data Node | +| hlogFileCount | None | Number of WAL files | +| hlogFileSize | None | Size of WAL files | + +#### Metric Set: IPC + + +| Metric Name | Unit | Metric Description | +| ------------------------- | ---- | -------------------------------------- | +| numActiveHandler | None | Current number of RITs | +| NotServingRegionException | None | Number of RITs exceeding the threshold | +| RegionMovedException | ms | Duration of the oldest RIT | +| RegionTooBusyException | ms | Duration of the oldest RIT | + +#### Metric Set: JVM + + +| Metric Name | Unit | Metric Description | +| -------------------- | ---- | --------------------------------- | +| MemNonHeapUsedM | None | Current active RegionServer list | +| MemNonHeapCommittedM | None | Current offline RegionServer list | +| MemHeapUsedM | None | Zookeeper list | +| MemHeapCommittedM | None | Master node | +| MemHeapMaxM | None | Cluster balance load times | +| MemMaxM | None | RPC handle count | +| GcCount | MB | Cluster data reception volume | diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hbase_master.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hbase_master.md index 79d5a7f9b0a..6d490ae8095 100644 --- a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hbase_master.md +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hbase_master.md @@ -1,7 +1,7 @@ --- id: hbase_master title: 监控:Hbase Master监控 -sidebar_label: HbaseMaster监控 +sidebar_label: Apache Hbase Master keywords: [开源监控系统, 开源数据库监控, HbaseMaster监控] --- > 对Hbase Master的通用性能指标进行采集监控 diff --git a/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hbase_regionserver.md b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hbase_regionserver.md new file mode 100644 index 00000000000..d0a1d129007 --- /dev/null +++ b/home/i18n/zh-cn/docusaurus-plugin-content-docs/current/help/hbase_regionserver.md @@ -0,0 +1,97 @@ +--- +id: hbase_regionserver +title: 监控 Hbase RegionServer监控 +sidebar_label: Apache Hbase RegionServer +keywords: [开源监控系统, 开源数据库监控, RegionServer监控] +--- +> 对Hbase RegionServer的通用性能指标进行采集监控 + +**使用协议:HTTP** + +## 监控前操作 + +查看 `hbase-site.xml` 文件,获取 `hbase.regionserver.info.port` 配置项的值,该值用作监控使用。 + +## 配置参数 + + +| 参数名称 | 参数帮助描述 | +| ------------ |---------------------------------------------------------------------| +| 目标Host | 被监控的对端IPV4,IPV6或域名。注意⚠️不带协议头(eg: https://, http://)。 | +| 端口 | hbase regionserver的端口号,默认为16030。即:`hbase.regionserver.info.port`参数值 | +| 任务名称 | 标识此监控的名称,名称需要保证唯一性。 | +| 查询超时时间 | 设置Kafka连接的超时时间,单位ms毫秒,默认3000毫秒。 | +| 采集间隔 | 监控周期性采集数据间隔时间,单位秒,可设置的最小间隔为30秒 | +| 是否探测 | 新增监控前是否先探测检查监控可用性,探测成功才会继续新增修改操作 | +| 描述备注 | 更多标识和描述此监控的备注信息,用户可以在这里备注信息 | + +### 采集指标 + +> 所有指标名称均直接引用官方的字段,所以存在命名不规范。 + +#### 指标集合:server + + +| 指标名称 | 指标单位 | 指标帮助描述 | +| -------------------- |-------|------------------------------------------| +| regionCount | 无 | Region数量 | +| readRequestCount | 无 | 重启集群后的读请求数量 | +| writeRequestCount | 无 | 重启集群后的写请求数量 | +| averageRegionSize | MB | 平均Region大小 | +| totalRequestCount | 无 | 全部请求数量 | +| ScanTime_num_ops | 无 | Scan 请求总量 | +| Append_num_ops | 无 | Append 请求量 | +| Increment_num_ops | 无 | Increment请求量 | +| Get_num_ops | 无 | Get 请求量 | +| Delete_num_ops | 无 | Delete 请求量 | +| Put_num_ops | 无 | Put 请求量 | +| ScanTime_mean | 无 | 平均 Scan 请求时间 | +| ScanTime_min | 无 | 最小 Scan 请求时间 | +| ScanTime_max | 无 | 最大 Scan 请求时间 | +| ScanSize_mean | bytes | 平均 Scan 请求大小 | +| ScanSize_min | 无 | 最小 Scan 请求大小 | +| ScanSize_max | 无 | 最大 Scan 请求大小 | +| slowPutCount | 无 | 慢操作次数/Put | +| slowGetCount | 无 | 慢操作次数/Get | +| slowAppendCount | 无 | 慢操作次数/Append | +| slowIncrementCount | 无 | 慢操作次数/Increment | +| slowDeleteCount | 无 | 慢操作次数/Delete | +| blockCacheSize | 无 | 缓存块内存占用大小 | +| blockCacheCount | 无 | 缓存块数量_Block Cache 中的 Block 数量 | +| blockCacheExpressHitPercent | 无 | 读缓存命中率 | +| memStoreSize | 无 | Memstore 大小 | +| FlushTime_num_ops | 无 | RS写磁盘次数/Memstore flush 写磁盘次数 | +| flushQueueLength | 无 | Region Flush 队列长度 | +| flushedCellsSize | 无 | flush到磁盘大小 | +| storeFileCount | 无 | Storefile 个数 | +| storeCount | 无 | Store 个数 | +| storeFileSize | 无 | Storefile 大小 | +| compactionQueueLength | 无 | Compaction 队列长度 | +| percentFilesLocal | 无 | Region 的 HFile 位于本地 HDFS Data Node的比例 | +| percentFilesLocalSecondaryRegions | 无 | Region 副本的 HFile 位于本地 HDFS Data Node的比例 | +| hlogFileCount | 无 | WAL 文件数量 | +| hlogFileSize | 无 | WAL 文件大小 | + +#### 指标集合:IPC + + +| 指标名称 | 指标单位 | 指标帮助描述 | +| --------------------- | ------ | ------------------- | +| numActiveHandler | 无 | 当前的 RIT 数量 | +| NotServingRegionException | 无 | 超过阈值的 RIT 数量 | +| RegionMovedException | ms | 最老的RIT的持续时间 | +| RegionTooBusyException | ms | 最老的RIT的持续时间 | + +#### 指标集合:JVM + + +| 指标名称 | 指标单位 | 指标帮助描述 | +| ----------------------- | ----- | ------------------------ | +| MemNonHeapUsedM | 无 | 当前活跃RegionServer列表 | +| MemNonHeapCommittedM | 无 | 当前离线RegionServer列表 | +| MemHeapUsedM | 无 | Zookeeper列表 | +| MemHeapCommittedM | 无 | Master节点 | +| MemHeapMaxM | 无 | 集群负载均衡次数 | +| MemMaxM | 无 | RPC句柄数 | +| GcCount | MB | 集群接收数据量 | + diff --git a/home/sidebars.json b/home/sidebars.json index ee96933da7d..bc2aff9a69c 100644 --- a/home/sidebars.json +++ b/home/sidebars.json @@ -210,6 +210,8 @@ "help/doris_be", "help/doris_fe", "help/hadoop", + "help/hbase_master", + "help/hbase_regionserver", "help/iotdb", "help/hive", "help/airflow", diff --git a/manager/src/main/resources/define/app-hbase_regionserver.yml b/manager/src/main/resources/define/app-hbase_regionserver.yml new file mode 100644 index 00000000000..9a1eb770c11 --- /dev/null +++ b/manager/src/main/resources/define/app-hbase_regionserver.yml @@ -0,0 +1,578 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# The monitoring type category:service-application service monitoring db-database monitoring custom-custom monitoring os-operating system monitoring +category: bigdata +# The monitoring type eg: linux windows tomcat mysql aws... +app: hbase_regionserver +# The monitoring i18n name +name: + zh-CN: Apache Hbase RegionServer + en-US: Apache Hbase RegionServer +# The description and help of this monitoring type +help: + zh-CN: Hertzbeat 对 Hbase 数据库 RegionServer 节点监控指标进行监控。
您可以点击 “新建 Apache Hbase RegionServer” 并进行配置,或者选择“更多操作”,导入已有配置。 + en-US: Hertzbeat monitors the RegionServer node monitoring indicators of the Hbase database.
You can click "New Apache Hbase RegionServer" to configure, or select "More Actions" to import an existing configuration. + zh-TW: Hertzbeat 對 Hbase 數據庫 RegionServer 节點監控指標進行監控。
您可以點擊 “新建 Apache Hbase RegionServer” 並進行配置,或者選擇“更多操作”,導入已有配置。 + +helpLink: + zh-CN: https://hertzbeat.apache.org/zh-cn/docs/help/hbase_regionserver/ + en-US: https://hertzbeat.apache.org/docs/help/hbase_regionserver/ +# Input params define for monitoring(render web ui by the definition) +params: + # field-param field key + - field: host + # name-param field display i18n name + name: + zh-CN: 目标Host + en-US: Target Host + # type-param field type(most mapping the html input type) + type: host + # required-true or false + required: true + # field-param field key + - field: port + # name-param field display i18n name + name: + zh-CN: 端口 + en-US: Port + # type-param field type(most mapping the html input type) + type: number + # when type is number, range is required + range: '[0,65535]' + # required-true or false + required: true + # default value + defaultValue: 16030 + # field-param field key + - field: timeout + # name-param field display i18n name + name: + zh-CN: 查询超时时间 + en-US: Query Timeout + # type-param field type(most mapping the html input type) + type: number + # required-true or false + required: false + # hide param-true or false + hide: true + # default value + defaultValue: 6000 +# collect metrics config list +metrics: + # metrics - Server + - name: Server + # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel + # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue + priority: 0 + # collect metrics content + fields: + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: regionCount + type: 0 + label: true + i18n: + zh-CN: Region数量 + en-US: regionCount + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: readRequestCount + type: 0 + label: true + i18n: + zh-CN: 重启集群后的读请求数量 + en-US: readRequestCount + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: writeRequestCount + type: 0 + label: true + i18n: + zh-CN: 重启集群后的写请求数量 + en-US: writeRequestCount + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: averageRegionSize + type: 0 + label: true + i18n: + zh-CN: 平均Region大小 + en-US: averageRegionSize + unit: 'MB' + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: totalRequestCount + type: 0 + label: true + i18n: + zh-CN: 全部请求数量 + en-US: totalRequestCount + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: ScanTime_num_ops + type: 0 + label: true + i18n: + zh-CN: Scan 请求总量 + en-US: ScanTime_num_ops + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: Append_num_ops + type: 0 + label: true + i18n: + zh-CN: Append 请求量 + en-US: Append_num_ops + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: Increment_num_ops + type: 0 + label: true + i18n: + zh-CN: Increment请求量 + en-US: Increment_num_ops + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: Get_num_ops + type: 0 + label: true + i18n: + zh-CN: Get 请求量 + en-US: Get_num_ops + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: Delete_num_ops + type: 0 + label: true + i18n: + zh-CN: Delete 请求量 + en-US: Delete_num_ops + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: Put_num_ops + type: 0 + label: true + i18n: + zh-CN: Put 请求量 + en-US: Put_num_ops + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: ScanTime_mean + type: 0 + label: true + i18n: + zh-CN: 平均 Scan 请求时间 + en-US: ScanTime_mean + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: ScanTime_min + type: 0 + label: true + i18n: + zh-CN: 最小 Scan 请求时间 + en-US: ScanTime_min + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: ScanTime_max + type: 0 + label: true + i18n: + zh-CN: 最大 Scan 请求时间 + en-US: ScanTime_max + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: ScanSize_mean + type: 0 + label: true + i18n: + zh-CN: 平均 Scan 请求大小 + en-US: ScanSize_mean + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: ScanSize_min + type: 0 + label: true + i18n: + zh-CN: 最小 Scan 请求大小 + en-US: ScanSize_min + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: ScanSize_max + type: 0 + label: true + i18n: + zh-CN: 最大 Scan 请求大小 + en-US: ScanSize_max + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: slowPutCount + type: 0 + label: true + i18n: + zh-CN: 慢操作次数/Put + en-US: slowPutCount + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: slowGetCount + type: 0 + label: true + i18n: + zh-CN: 慢操作次数/Get + en-US: slowGetCount + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: slowAppendCount + type: 0 + label: true + i18n: + zh-CN: 慢操作次数/Append + en-US: slowAppendCount + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: slowIncrementCount + type: 0 + label: true + i18n: + zh-CN: 慢操作次数/Increment + en-US: slowIncrementCount + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: slowDeleteCount + type: 0 + label: true + i18n: + zh-CN: 慢操作次数/Delete + en-US: slowDeleteCount + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: blockCacheSize + type: 0 + label: true + unit: 'MB' + i18n: + zh-CN: 缓存块内存占用大小 + en-US: blockCacheSize + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: blockCacheCount + type: 0 + label: true + i18n: + zh-CN: 缓存块数量_Block Cache 中的 Block 数量 + en-US: blockCacheCount + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: blockCacheExpressHitPercent + type: 0 + label: true + i18n: + zh-CN: 读缓存命中率 + en-US: blockCacheExpressHitPercent + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: memStoreSize + type: 0 + label: true + i18n: + zh-CN: Memstore 大小 + en-US: memStoreSize + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: FlushTime_num_ops + type: 0 + label: true + i18n: + zh-CN: RS写磁盘次数/MemStore Flush 写磁盘次数 + en-US: FlushTime_num_ops + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: flushQueueLength + type: 0 + label: true + i18n: + zh-CN: Region Flush 队列长度 + en-US: flushQueueLength + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: flushedCellsSize + type: 0 + label: true + unit: 'MB' + i18n: + zh-CN: flush到磁盘大小 + en-US: flushedCellsSize + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: storeCount + type: 0 + label: true + i18n: + zh-CN: Store 个数 + en-US: storeCount + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: storeFileCount + type: 0 + label: true + i18n: + zh-CN: Storefile 个数 + en-US: storeFileCount + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: storeFileSize + type: 0 + label: true + unit: 'MB' + i18n: + zh-CN: Storefile 大小 + en-US: storeFileSize + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: compactionQueueLength + type: 0 + label: true + i18n: + zh-CN: Compaction 队列长度 + en-US: compactionQueueLength + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: percentFilesLocal + type: 0 + label: true + i18n: + zh-CN: Region 的 HFile 位于本地 HDFS data node的比例 + en-US: percentFilesLocal + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: percentFilesLocalSecondaryRegions + type: 0 + label: true + i18n: + zh-CN: Region 副本的 HFile 位于本地 HDFS data node的比例 + en-US: percentFilesLocalSecondaryRegions + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: hlogFileCount + type: 0 + label: true + i18n: + zh-CN: WAL 文件数量 + en-US: hlogFileCount + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: hlogFileSize + type: 0 + label: true + i18n: + zh-CN: WAL 文件大小 + en-US: hlogFileSize + # (optional)metrics field alias name, it is used as an alias field to map and convert the collected data and metrics field + aliasFields: + - $.regionCount + - $.readRequestCount + - $.writeRequestCount + - $.averageRegionSize + - $.totalRequestCount + - $.ScanTime_num_ops + - $.Append_num_ops + - $.Increment_num_ops + - $.Get_num_ops + - $.Delete_num_ops + - $.Put_num_ops + - $.ScanTime_mean + - $.ScanTime_min + - $.ScanTime_max + - $.ScanSize_mean + - $.ScanSize_min + - $.ScanSize_max + - $.slowPutCount + - $.slowGetCount + - $.slowAppendCount + - $.slowIncrementCount + - $.slowDeleteCount + - $.blockCacheSize + - $.blockCacheCount + - $.blockCacheExpressHitPercent + - $.memStoreSize + - $.FlushTime_num_ops + - $.flushQueueLength + - $.flushedCellsSize + - $.storeCount + - $.storeFileCount + - $.storeFileSize + - $.compactionQueueLength + - $.percentFilesLocal + - $.percentFilesLocalSecondaryRegions + - $.hlogFileCount + - $.hlogFileSize + calculates: + - regionCount=$.regionCount + - readRequestCount=$.readRequestCount + - writeRequestCount=$.writeRequestCount + - averageRegionSize=$.averageRegionSize + - totalRequestCount=$.totalRequestCount + - ScanTime_num_ops=$.ScanTime_num_ops + - Append_num_ops=$.Append_num_ops + - Increment_num_ops=$.Increment_num_ops + - Get_num_ops=$.Get_num_ops + - Delete_num_ops=$.Delete_num_ops + - Put_num_ops=$.Put_num_ops + - ScanTime_mean=$.ScanTime_mean + - ScanTime_min=$.ScanTime_min + - ScanTime_max=$.ScanTime_max + - ScanSize_mean=$.ScanSize_mean + - ScanSize_min=$.ScanSize_min + - ScanSize_max=$.ScanSize_max + - slowPutCount=$.slowPutCount + - slowGetCount=$.slowGetCount + - slowAppendCount=$.slowAppendCount + - slowIncrementCount=$.slowIncrementCount + - slowDeleteCount=$.slowDeleteCount + - blockCacheSize=$.blockCacheSize + - blockCacheCount=$.blockCacheCount + - blockCacheExpressHitPercent=$.blockCacheExpressHitPercent + - memStoreSize=$.memStoreSize + - FlushTime_num_ops=$.FlushTime_num_ops + - flushQueueLength=$.flushQueueLength + - flushedCellsSize=$.flushedCellsSize + - storeCount=$.storeCount + - storeFileCount=$.storeFileCount + - storeFileSize=$.storeFileSize + - compactionQueueLength=$.compactionQueueLength + - percentFilesLocal=$.percentFilesLocal + - percentFilesLocalSecondaryRegions=$.percentFilesLocalSecondaryRegions + - hlogFileCount=$.hlogFileCount + - hlogFileSize=$.hlogFileSize + units: + - averageRegionSize=B->MB + - blockCacheSize=B->MB + - storeFileSize=B->MB + - flushedCellsSize=B->MB + protocol: http + http: + host: ^_^host^_^ + port: ^_^port^_^ + url: /jmx + method: GET + ssl: ^_^ssl^_^ + parseType: jsonPath + parseScript: '$.beans[?(@.name == "Hadoop:service=HBase,name=RegionServer,sub=Server")]' + # metrics - IPC + - name: IPC + # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel + # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue + priority: 0 + # collect metrics content + fields: + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: numActiveHandler + type: 0 + label: true + i18n: + zh-CN: RPC句柄数 + en-US: numActiveHandler + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: NotServingRegionException + type: 0 + label: true + i18n: + zh-CN: NotServingRegionException 异常数量 + en-US: NotServingRegionException + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: RegionMovedException + type: 0 + label: true + i18n: + zh-CN: RegionMovedException异常数量 + en-US: RegionMovedException + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: RegionTooBusyException + type: 0 + label: true + i18n: + zh-CN: RegionTooBusyException异常数量 + en-US: RegionTooBusyException + # (optional)metrics field alias name, it is used as an alias field to map and convert the collected data and metrics field + aliasFields: + - $.numActiveHandler + - $.['exceptions.NotServingRegionException'] + - $.['exceptions.RegionMovedException'] + - $.['exceptions.RegionTooBusyException'] + calculates: + - numActiveHandler=$.numActiveHandler + - NotServingRegionException=#`$.['exceptions.NotServingRegionException']` + - RegionMovedException=#`$.['exceptions.RegionMovedException']` + - RegionTooBusyException=#`$.['exceptions.RegionTooBusyException']` + protocol: http + http: + host: ^_^host^_^ + port: ^_^port^_^ + url: /jmx + method: GET + ssl: ^_^ssl^_^ + parseType: jsonPath + parseScript: '$.beans[?(@.name == "Hadoop:service=HBase,name=RegionServer,sub=IPC")]' + # metrics - JVM + - name: JVM + # metrics scheduling priority(0->127)->(high->low), metrics with the same priority will be scheduled in parallel + # priority 0's metrics is availability metrics, it will be scheduled first, only availability metrics collect success will the scheduling continue + priority: 0 + # collect metrics content + fields: + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: MemNonHeapUsedM + type: 0 + label: true + unit: 'MB' + i18n: + zh-CN: 进程使用的非堆内存大小 + en-US: MemNonHeapUsedM + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: MemNonHeapCommittedM + type: 0 + label: true + unit: 'MB' + i18n: + zh-CN: 进程 commit 的非堆内存大小 + en-US: MemNonHeapCommittedM + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: MemHeapUsedM + type: 0 + label: true + unit: 'MB' + i18n: + zh-CN: 进程使用的堆内存大小 + en-US: MemHeapUsedM + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: MemHeapCommittedM + type: 0 + label: true + unit: 'MB' + i18n: + zh-CN: 进程 commit 的堆内存大小 + en-US: MemHeapCommittedM + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: MemHeapMaxM + type: 0 + label: true + unit: 'MB' + i18n: + zh-CN: 进程最大的堆内存大小 + en-US: MemHeapMaxM + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: MemMaxM + type: 0 + label: true + unit: 'MB' + i18n: + zh-CN: 进程最大内存大小 + en-US: MemMaxM + # field-metric name, type-metric type(0-number,1-string), unit-metric unit('%','ms','MB'), label-whether it is a metrics label field + - field: GcCount + type: 0 + label: true + i18n: + zh-CN: Young GC次数 + en-US: GcCount + # (optional)metrics field alias name, it is used as an alias field to map and convert the collected data and metrics field + aliasFields: + - $.MemNonHeapUsedM + - $.MemNonHeapCommittedM + - $.MemHeapUsedM + - $.MemHeapCommittedM + - $.MemHeapMaxM + - $.MemMaxM + - $.GcCount + calculates: + - MemNonHeapUsedM=$.MemNonHeapUsedM + - MemNonHeapCommittedM=$.MemNonHeapCommittedM + - MemHeapUsedM=$.MemHeapUsedM + - MemHeapCommittedM=$.MemHeapCommittedM + - MemHeapMaxM=$.MemHeapMaxM + - MemMaxM=$.MemMaxM + - GcCount=$.GcCount + protocol: http + http: + host: ^_^host^_^ + port: ^_^port^_^ + url: /jmx + method: GET + ssl: ^_^ssl^_^ + parseType: jsonPath + parseScript: '$.beans[?(@.name == "Hadoop:service=HBase,name=JvmMetrics")]' \ No newline at end of file From c0c9ff07926de1cc52352d801e7a40b86d676952 Mon Sep 17 00:00:00 2001 From: LiuTianyou Date: Thu, 25 Apr 2024 14:47:36 +0800 Subject: [PATCH 6/7] [improve] optimize websocket monitor (#1838) Co-authored-by: tomsun28 --- .../websocket/WebsocketCollectImpl.java | 41 ++++++++++++------- .../job/protocol/WebsocketProtocol.java | 5 +++ .../main/resources/define/app-websocket.yml | 16 ++++++++ 3 files changed, 47 insertions(+), 15 deletions(-) diff --git a/collector/src/main/java/org/apache/hertzbeat/collector/collect/websocket/WebsocketCollectImpl.java b/collector/src/main/java/org/apache/hertzbeat/collector/collect/websocket/WebsocketCollectImpl.java index 5bdbd35ade4..1f59f761a5b 100644 --- a/collector/src/main/java/org/apache/hertzbeat/collector/collect/websocket/WebsocketCollectImpl.java +++ b/collector/src/main/java/org/apache/hertzbeat/collector/collect/websocket/WebsocketCollectImpl.java @@ -44,6 +44,7 @@ import org.apache.hertzbeat.common.entity.job.protocol.WebsocketProtocol; import org.apache.hertzbeat.common.entity.message.CollectRep; import org.apache.hertzbeat.common.util.CommonUtil; +import org.springframework.util.Assert; /** * Websocket Collect @@ -62,6 +63,11 @@ public void collect(CollectRep.MetricsData.Builder builder, long monitorId, Stri return; } WebsocketProtocol websocketProtocol = metrics.getWebsocket(); + // Compatible with monitoring templates without path parameters + if (StringUtils.isBlank(websocketProtocol.getPath())) { + websocketProtocol.setPath("/"); + } + checkParam(websocketProtocol); String host = websocketProtocol.getHost(); String port = websocketProtocol.getPort(); Socket socket = null; @@ -74,13 +80,12 @@ public void collect(CollectRep.MetricsData.Builder builder, long monitorId, Stri long responseTime = System.currentTimeMillis() - startTime; OutputStream out = socket.getOutputStream(); InputStream in = socket.getInputStream(); - - - send(out); + + send(out, websocketProtocol); Map resultMap = readHeaders(in); resultMap.put(CollectorConstants.RESPONSE_TIME, Long.toString(responseTime)); - // 关闭输出流和Socket连接 + // Close the output stream and socket connection in.close(); out.close(); socket.close(); @@ -118,10 +123,10 @@ public String supportProtocol() { return DispatchConstants.PROTOCOL_WEBSOCKET; } - private static void send(OutputStream out) throws IOException { + private static void send(OutputStream out, WebsocketProtocol websocketProtocol) throws IOException { byte[] key = generateRandomKey(); String base64Key = base64Encode(key); - String requestLine = "GET / HTTP/1.1\r\n"; + String requestLine = "GET " + websocketProtocol.getPath() + " HTTP/1.1\r\n"; out.write(requestLine.getBytes()); String hostName = InetAddress.getLocalHost().getHostAddress(); out.write(("Host:" + hostName + "\r\n").getBytes()); @@ -135,7 +140,7 @@ private static void send(OutputStream out) throws IOException { out.flush(); } - // 读取响应头 + // Read response headers private static Map readHeaders(InputStream in) throws IOException { Map map = new HashMap<>(8); @@ -147,19 +152,19 @@ private static Map readHeaders(InputStream in) throws IOExceptio if (separatorIndex != -1) { String key = line.substring(0, separatorIndex).trim(); String value = line.substring(separatorIndex + 1).trim(); - // 首字母小写化 + // Lowercase first letter map.put(StringUtils.uncapitalize(key), value); } else { - // 切割HTTP/1.1, 101, Switching Protocols + // Cut HTTP/1.1, 101, Switching Protocols String[] parts = line.split("\\s+", 3); if (parts.length == 3) { - for (int i = 0; i < parts.length; i++) { - if (parts[i].startsWith("HTTP")) { - map.put("httpVersion", parts[i]); - } else if (Character.isDigit(parts[i].charAt(0))) { - map.put("responseCode", parts[i]); + for (String part : parts) { + if (part.startsWith("HTTP")) { + map.put("httpVersion", part); + } else if (StringUtils.isNotBlank(part) && Character.isDigit(part.charAt(0))) { + map.put("responseCode", part); } else { - map.put("statusMessage", parts[i]); + map.put("statusMessage", part); } } } @@ -175,6 +180,12 @@ private static byte[] generateRandomKey() { return key; } + private void checkParam(WebsocketProtocol protocol) { + Assert.hasText(protocol.getHost(), "Websocket Protocol host is required."); + Assert.hasText(protocol.getPort(), "Websocket Protocol port is required."); + Assert.hasText(protocol.getPath(), "Websocket Protocol path is required."); + } + private static String base64Encode(byte[] data) { return Base64.getEncoder().encodeToString(data); } diff --git a/common/src/main/java/org/apache/hertzbeat/common/entity/job/protocol/WebsocketProtocol.java b/common/src/main/java/org/apache/hertzbeat/common/entity/job/protocol/WebsocketProtocol.java index 9d46b00028c..2118b13caaa 100644 --- a/common/src/main/java/org/apache/hertzbeat/common/entity/job/protocol/WebsocketProtocol.java +++ b/common/src/main/java/org/apache/hertzbeat/common/entity/job/protocol/WebsocketProtocol.java @@ -39,4 +39,9 @@ public class WebsocketProtocol { * Port number */ private String port; + + /** + * The path to the websocket endpoint + */ + private String path; } diff --git a/manager/src/main/resources/define/app-websocket.yml b/manager/src/main/resources/define/app-websocket.yml index 4a3e3b9f830..d406c2f4e1b 100644 --- a/manager/src/main/resources/define/app-websocket.yml +++ b/manager/src/main/resources/define/app-websocket.yml @@ -63,6 +63,21 @@ params: # required-true or false # 是否是必输项 true-必填 false-可选 required: true + # field-param field key + # field-字段名称标识符 + - field: path + # name-param field display i18n name + # name-参数字段显示名称 + name: + zh-CN: WebSocket服务的路径 + en-US: Path of WebSocket service + # type-param field type(most mapping the html input type) + # type-字段类型,样式(大部分映射input标签type属性) + type: text + # required-true or false + # 是否是必输项 true-必填 false-可选 + required: true + defaultValue: / # collect metrics config list # 采集指标配置列表 metrics: @@ -124,3 +139,4 @@ metrics: # 远程登录主机 host: ^_^host^_^ port: ^_^port^_^ + path: ^_^path^_^ From 41de1b9b5d440861e45e0f9f0b5ada2f9d980678 Mon Sep 17 00:00:00 2001 From: xuziyang <767637918@qq.com> Date: Thu, 25 Apr 2024 14:54:38 +0800 Subject: [PATCH 7/7] [refactor] split the WarehouseProperties class (#1830) Co-authored-by: tomsun28 --- .../apache/hertzbeat/manager/ManagerTest.java | 2 - .../config/WarehouseAutoConfiguration.java | 2 - .../warehouse/config/WarehouseProperties.java | 176 ------------------ .../config/entrance/KafkaProperties.java | 33 ++++ .../config/store/StoreProperties.java | 44 +++++ .../store/greptime/GreptimeProperties.java | 31 +++ .../store/influxdb/InfluxdbProperties.java | 33 ++++ .../config/store/iotdb/IotDbProperties.java | 48 +++++ .../{ => store/iotdb}/IotDbVersion.java | 2 +- .../config/store/jpa/JpaProperties.java | 33 ++++ .../config/store/memory/MemoryProperties.java | 31 +++ .../config/store/redis/RedisProperties.java | 32 ++++ .../store/tdengine/TdEngineProperties.java | 38 ++++ .../store/vm/VictoriaMetricsProperties.java | 31 +++ .../store/HistoryGrepTimeDbDataStorage.java | 8 +- .../store/HistoryInfluxdbDataStorage.java | 11 +- .../store/HistoryIotDbDataStorage.java | 10 +- .../store/HistoryJpaDatabaseDataStorage.java | 8 +- .../store/HistoryTdEngineDataStorage.java | 12 +- .../HistoryVictoriaMetricsDataStorage.java | 10 +- .../store/RealTimeMemoryDataStorage.java | 10 +- .../store/RealTimeRedisDataStorage.java | 18 +- 22 files changed, 398 insertions(+), 225 deletions(-) delete mode 100644 warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/WarehouseProperties.java create mode 100644 warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/entrance/KafkaProperties.java create mode 100644 warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/store/StoreProperties.java create mode 100644 warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/store/greptime/GreptimeProperties.java create mode 100644 warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/store/influxdb/InfluxdbProperties.java create mode 100644 warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/store/iotdb/IotDbProperties.java rename warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/{ => store/iotdb}/IotDbVersion.java (94%) create mode 100644 warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/store/jpa/JpaProperties.java create mode 100644 warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/store/memory/MemoryProperties.java create mode 100644 warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/store/redis/RedisProperties.java create mode 100644 warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/store/tdengine/TdEngineProperties.java create mode 100644 warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/store/vm/VictoriaMetricsProperties.java diff --git a/manager/src/test/java/org/apache/hertzbeat/manager/ManagerTest.java b/manager/src/test/java/org/apache/hertzbeat/manager/ManagerTest.java index f50761afef4..47333b3c44d 100644 --- a/manager/src/test/java/org/apache/hertzbeat/manager/ManagerTest.java +++ b/manager/src/test/java/org/apache/hertzbeat/manager/ManagerTest.java @@ -42,7 +42,6 @@ import org.apache.hertzbeat.common.service.TencentSmsClient; import org.apache.hertzbeat.common.support.SpringContextHolder; import org.apache.hertzbeat.warehouse.WarehouseWorkerPool; -import org.apache.hertzbeat.warehouse.config.WarehouseProperties; import org.apache.hertzbeat.warehouse.controller.MetricsDataController; import org.apache.hertzbeat.warehouse.store.HistoryIotDbDataStorage; import org.apache.hertzbeat.warehouse.store.HistoryTdEngineDataStorage; @@ -102,7 +101,6 @@ void testAutoImport() { assertNotNull(ctx.getBean(SpringContextHolder.class)); // test warehouse module - assertNotNull(ctx.getBean(WarehouseProperties.class)); assertNotNull(ctx.getBean(WarehouseWorkerPool.class)); // default DataStorage is RealTimeMemoryDataStorage diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/WarehouseAutoConfiguration.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/WarehouseAutoConfiguration.java index 184ec9ef7aa..23e3feb222e 100644 --- a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/WarehouseAutoConfiguration.java +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/WarehouseAutoConfiguration.java @@ -17,7 +17,6 @@ package org.apache.hertzbeat.warehouse.config; -import org.springframework.boot.context.properties.EnableConfigurationProperties; import org.springframework.context.annotation.ComponentScan; /** @@ -25,6 +24,5 @@ * @version 2.1 */ @ComponentScan(basePackages = "org.apache.hertzbeat.warehouse") -@EnableConfigurationProperties(WarehouseProperties.class) public class WarehouseAutoConfiguration { } diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/WarehouseProperties.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/WarehouseProperties.java deleted file mode 100644 index 3dc69dfd84c..00000000000 --- a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/WarehouseProperties.java +++ /dev/null @@ -1,176 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hertzbeat.warehouse.config; - -import java.time.ZoneId; -import java.util.List; -import org.springframework.boot.context.properties.ConfigurationProperties; -import org.springframework.boot.context.properties.bind.DefaultValue; - -/** - * Data warehouse configuration properties - * @param entrance Data entry configuration properties - * @param store Datastore configuration properties - */ -@ConfigurationProperties(prefix = "warehouse") -public record WarehouseProperties ( - EntranceProperties entrance, - StoreProperties store -) { - - /** - * Data entry configuration properties - * The entrance can be to obtain data from message middleware such as kafka rabbitmq rocketmq - */ - public record EntranceProperties( - KafkaProperties kafka - ){ - /** - * kafka configuration information - */ - public record KafkaProperties( - @DefaultValue("true") boolean enabled, - @DefaultValue("127.0.0.1:9092") String servers, - String topic, - String groupId - ){} - } - - /** - * Scheduling data export configuration properties - * @param jpa use mysql/h2 jpa store metrics history data - * @param memory Memory storage configuration information - * @param influxdb influxdb configuration information - * @param redis redis configuration information - * @param victoriaMetrics VictoriaMetrics Properties - * @param tdEngine TdEngine configuration information - * @param iotDb IoTDB configuration information - * @param greptime GrepTimeDB Config - */ - public record StoreProperties( - JpaProperties jpa, - MemoryProperties memory, - InfluxdbProperties influxdb, - RedisProperties redis, - VictoriaMetricsProperties victoriaMetrics, - TdEngineProperties tdEngine, - IotDbProperties iotDb, - GreptimeProperties greptime - ){ - /** - * Memory storage configuration information - * @param enabled Whether memory data storage is enabled - * @param initSize Memory storage map initialization size - */ - public record MemoryProperties( - @DefaultValue("true") boolean enabled, - @DefaultValue("1024") Integer initSize - ){} - - /** - * JPA configuration information - * @param enabled use mysql/h2 jpa store metrics history data - * @param expireTime save data expire time(ms) - * @param maxHistoryRecordNum The maximum number of history records retained - */ - public record JpaProperties( - @DefaultValue("true") boolean enabled, - @DefaultValue("1h") String expireTime, - @DefaultValue("20000") Integer maxHistoryRecordNum - ) {} - - /** - * Influxdb configuration information - */ - public record InfluxdbProperties( - @DefaultValue("false") boolean enabled, - String serverUrl, - String username, - String password, - @DefaultValue("30d") String expireTime, - @DefaultValue("1") int replication) {} - - /** - * - * @param enabled Whether the TdEngine data store is enabled - * @param url TdEngine connect url - * @param driverClassName tdengine driver, default restful driver - * @param username tdengine username - * @param password tdengine password - * @param tableStrColumnDefineMaxLength auto create table's string column define max length : NCHAR(200) - */ - public record TdEngineProperties( - @DefaultValue("false") boolean enabled, - @DefaultValue("jdbc:TAOS-RS://localhost:6041/demo") String url, - @DefaultValue("com.taosdata.jdbc.rs.RestfulDriver") String driverClassName, - String username, - String password, - @DefaultValue("200") int tableStrColumnDefineMaxLength) {} - - /** - * Victoriametrics configuration information - */ - public record VictoriaMetricsProperties( - @DefaultValue("false") boolean enabled, - @DefaultValue("http://localhost:8428") String url, - String username, - String password) {} - - /** - * Redis configuration information - */ - public record RedisProperties( - @DefaultValue("false") boolean enabled, - @DefaultValue("127.0.0.1") String host, - @DefaultValue("6379") Integer port, - String password, - @DefaultValue("0") Integer db) {} - - /** - * IotDB configuration information - * @param enabled Whether the iotDB data store is enabled - * @param host iotDB host - * @param expireTime save data expire time(ms),-1 means it never expires Data storage time (unit: ms,-1 means never expire) - * Note: Why is String used here instead of Long? At present, the set ttl of IoTDB only supports milliseconds as a unit, - * and other units may be added later, so the String type is used for compatibility Data storage time (unit: ms, -1 means never expires) - * Note: Why use String instead of Long here? Currently, IoTDB's set ttl only supports milliseconds as the unit. - * Other units may be added later. In order to be compatible with the future, the String type is used. - */ - public record IotDbProperties( - @DefaultValue("false") boolean enabled, - @DefaultValue("127.0.0.1") String host, - @DefaultValue("6667") Integer rpcPort, - String username, - String password, - List nodeUrls, - ZoneId zoneId, - IotDbVersion version, - long queryTimeoutInMs, - String expireTime) {} - - /** - * GrepTimeDB configuration information - */ - public record GreptimeProperties( - @DefaultValue("false") boolean enabled, - @DefaultValue("127.0.0.1:4001") String endpoint, - String username, - String password) {} - } - -} diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/entrance/KafkaProperties.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/entrance/KafkaProperties.java new file mode 100644 index 00000000000..a1580705486 --- /dev/null +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/entrance/KafkaProperties.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.warehouse.config.entrance; + +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.boot.context.properties.ConfigurationPropertiesScan; +import org.springframework.boot.context.properties.bind.DefaultValue; + +/** + * kafka configuration information + */ +@ConfigurationProperties(prefix = "warehouse.entrance.kafka") +@ConfigurationPropertiesScan("org.apache.hertzbeat.warehouse.config") +public record KafkaProperties(@DefaultValue("true") boolean enabled, + @DefaultValue("127.0.0.1:9092") String servers, + String topic, + String groupId) { +} diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/store/StoreProperties.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/store/StoreProperties.java new file mode 100644 index 00000000000..7e7bf275404 --- /dev/null +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/store/StoreProperties.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.warehouse.config.store; + +import org.apache.hertzbeat.warehouse.config.store.greptime.GreptimeProperties; +import org.apache.hertzbeat.warehouse.config.store.influxdb.InfluxdbProperties; +import org.apache.hertzbeat.warehouse.config.store.iotdb.IotDbProperties; +import org.apache.hertzbeat.warehouse.config.store.jpa.JpaProperties; +import org.apache.hertzbeat.warehouse.config.store.memory.MemoryProperties; +import org.apache.hertzbeat.warehouse.config.store.redis.RedisProperties; +import org.apache.hertzbeat.warehouse.config.store.tdengine.TdEngineProperties; +import org.apache.hertzbeat.warehouse.config.store.vm.VictoriaMetricsProperties; + +/** + * Scheduling data export configuration properties + * @param jpa use mysql/h2 jpa store metrics history data + * @param memory Memory storage configuration information + * @param influxdb influxdb configuration information + * @param redis redis configuration information + * @param victoriaMetrics VictoriaMetrics Properties + * @param tdEngine TdEngine configuration information + * @param iotDb IoTDB configuration information + * @param greptime GrepTimeDB Config + */ +public record StoreProperties(JpaProperties jpa, MemoryProperties memory, InfluxdbProperties influxdb, + RedisProperties redis, VictoriaMetricsProperties victoriaMetrics, + TdEngineProperties tdEngine, IotDbProperties iotDb, GreptimeProperties greptime) { + +} diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/store/greptime/GreptimeProperties.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/store/greptime/GreptimeProperties.java new file mode 100644 index 00000000000..b7d439d2166 --- /dev/null +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/store/greptime/GreptimeProperties.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.warehouse.config.store.greptime; + +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.boot.context.properties.bind.DefaultValue; + +/** + * GrepTimeDB configuration information + */ +@ConfigurationProperties(prefix = "warehouse.store.greptime") +public record GreptimeProperties(@DefaultValue("false") boolean enabled, + @DefaultValue("127.0.0.1:4001") String endpoint, + String username, + String password) { +} diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/store/influxdb/InfluxdbProperties.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/store/influxdb/InfluxdbProperties.java new file mode 100644 index 00000000000..3dcbb220784 --- /dev/null +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/store/influxdb/InfluxdbProperties.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.warehouse.config.store.influxdb; + +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.boot.context.properties.bind.DefaultValue; + +/** + * Influxdb configuration information + */ +@ConfigurationProperties(prefix = "warehouse.store.influxdb") +public record InfluxdbProperties(@DefaultValue("false") boolean enabled, + String serverUrl, + String username, + String password, + @DefaultValue("30d") String expireTime, + @DefaultValue("1") int replication) { +} diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/store/iotdb/IotDbProperties.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/store/iotdb/IotDbProperties.java new file mode 100644 index 00000000000..28b964dbdf5 --- /dev/null +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/store/iotdb/IotDbProperties.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.warehouse.config.store.iotdb; + +import java.time.ZoneId; +import java.util.List; +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.boot.context.properties.bind.DefaultValue; + + + +/** + * IotDB configuration information + * @param enabled Whether the iotDB data store is enabled + * @param host iotDB host + * @param expireTime save data expire time(ms),-1 means it never expires Data storage time (unit: ms,-1 means never expire) + * Note: Why is String used here instead of Long? At present, the set ttl of IoTDB only supports milliseconds as a unit, + * and other units may be added later, so the String type is used for compatibility Data storage time (unit: ms, -1 means never expires) + * Note: Why use String instead of Long here? Currently, IoTDB's set ttl only supports milliseconds as the unit. + * Other units may be added later. In order to be compatible with the future, the String type is used. + */ +@ConfigurationProperties(prefix = "warehouse.store.iot-db") +public record IotDbProperties(@DefaultValue("false") boolean enabled, + @DefaultValue("127.0.0.1") String host, + @DefaultValue("6667") Integer rpcPort, + String username, + String password, + List nodeUrls, + ZoneId zoneId, + IotDbVersion version, + long queryTimeoutInMs, + String expireTime) { +} diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/IotDbVersion.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/store/iotdb/IotDbVersion.java similarity index 94% rename from warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/IotDbVersion.java rename to warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/store/iotdb/IotDbVersion.java index 0a356e809d2..ae51e63b2de 100644 --- a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/IotDbVersion.java +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/store/iotdb/IotDbVersion.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.hertzbeat.warehouse.config; +package org.apache.hertzbeat.warehouse.config.store.iotdb; /** * IoTDB user version diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/store/jpa/JpaProperties.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/store/jpa/JpaProperties.java new file mode 100644 index 00000000000..986b824bf10 --- /dev/null +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/store/jpa/JpaProperties.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.warehouse.config.store.jpa; + +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.boot.context.properties.bind.DefaultValue; + +/** + * JPA configuration information + * @param enabled use mysql/h2 jpa store metrics history data + * @param expireTime save data expire time(ms) + * @param maxHistoryRecordNum The maximum number of history records retained + */ +@ConfigurationProperties(prefix = "warehouse.store.jpa") +public record JpaProperties(@DefaultValue("true") boolean enabled, + @DefaultValue("1h") String expireTime, + @DefaultValue("20000") Integer maxHistoryRecordNum) { +} diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/store/memory/MemoryProperties.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/store/memory/MemoryProperties.java new file mode 100644 index 00000000000..12003e25071 --- /dev/null +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/store/memory/MemoryProperties.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.warehouse.config.store.memory; + +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.boot.context.properties.bind.DefaultValue; + +/** + * Memory storage configuration information + * @param enabled Whether memory data storage is enabled + * @param initSize Memory storage map initialization size + */ +@ConfigurationProperties(prefix = "warehouse.store.memory") +public record MemoryProperties(@DefaultValue("true") boolean enabled, + @DefaultValue("1024") Integer initSize) { +} diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/store/redis/RedisProperties.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/store/redis/RedisProperties.java new file mode 100644 index 00000000000..93f2d5ed9b9 --- /dev/null +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/store/redis/RedisProperties.java @@ -0,0 +1,32 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.warehouse.config.store.redis; + +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.boot.context.properties.bind.DefaultValue; + +/** + * Redis configuration information + */ +@ConfigurationProperties(prefix = "warehouse.store.redis") +public record RedisProperties(@DefaultValue("false") boolean enabled, + @DefaultValue("127.0.0.1") String host, + @DefaultValue("6379") Integer port, + String password, + @DefaultValue("0") Integer db) { +} diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/store/tdengine/TdEngineProperties.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/store/tdengine/TdEngineProperties.java new file mode 100644 index 00000000000..313a8c239a0 --- /dev/null +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/store/tdengine/TdEngineProperties.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.warehouse.config.store.tdengine; + +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.boot.context.properties.bind.DefaultValue; + +/** + * + * @param enabled Whether the TdEngine data store is enabled + * @param url TdEngine connect url + * @param driverClassName tdengine driver, default restful driver + * @param username tdengine username + * @param password tdengine password + * @param tableStrColumnDefineMaxLength auto create table's string column define max length : NCHAR(200) + */ +@ConfigurationProperties(prefix = "warehouse.store.td-engine") +public record TdEngineProperties(@DefaultValue("false") boolean enabled, + @DefaultValue("jdbc:TAOS-RS://localhost:6041/demo") String url, + @DefaultValue("com.taosdata.jdbc.rs.RestfulDriver") String driverClassName, + String username, String password, + @DefaultValue("200") int tableStrColumnDefineMaxLength) { +} diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/store/vm/VictoriaMetricsProperties.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/store/vm/VictoriaMetricsProperties.java new file mode 100644 index 00000000000..f6cf555e3c1 --- /dev/null +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/config/store/vm/VictoriaMetricsProperties.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hertzbeat.warehouse.config.store.vm; + +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.boot.context.properties.bind.DefaultValue; + +/** + * Victoriametrics configuration information + */ +@ConfigurationProperties(prefix = "warehouse.store.victoria-metrics") +public record VictoriaMetricsProperties(@DefaultValue("false") boolean enabled, + @DefaultValue("http://localhost:8428") String url, + String username, + String password) { +} diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/HistoryGrepTimeDbDataStorage.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/HistoryGrepTimeDbDataStorage.java index eaea3595222..06c52fa0f80 100644 --- a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/HistoryGrepTimeDbDataStorage.java +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/HistoryGrepTimeDbDataStorage.java @@ -53,7 +53,7 @@ import org.apache.hertzbeat.common.entity.message.CollectRep; import org.apache.hertzbeat.common.util.JsonUtil; import org.apache.hertzbeat.common.util.TimePeriodUtil; -import org.apache.hertzbeat.warehouse.config.WarehouseProperties; +import org.apache.hertzbeat.warehouse.config.store.greptime.GreptimeProperties; import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; import org.springframework.stereotype.Component; @@ -83,11 +83,11 @@ public class HistoryGrepTimeDbDataStorage extends AbstractHistoryDataStorage { private static final String DATABASE_NOT_EXIST = "not exist"; private GreptimeDB greptimeDb; - public HistoryGrepTimeDbDataStorage(WarehouseProperties properties) { - this.serverAvailable = this.initDbSession(properties.store().greptime()); + public HistoryGrepTimeDbDataStorage(GreptimeProperties greptimeProperties) { + this.serverAvailable = this.initDbSession(greptimeProperties); } - private boolean initDbSession(WarehouseProperties.StoreProperties.GreptimeProperties properties) { + private boolean initDbSession(GreptimeProperties properties) { String endpoint = properties.endpoint(); GreptimeOptions opts = GreptimeOptions.newBuilder(endpoint) .writeMaxRetries(1) diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/HistoryInfluxdbDataStorage.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/HistoryInfluxdbDataStorage.java index d86f9beb5b7..522736c94f4 100644 --- a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/HistoryInfluxdbDataStorage.java +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/HistoryInfluxdbDataStorage.java @@ -40,7 +40,7 @@ import org.apache.hertzbeat.common.entity.dto.Value; import org.apache.hertzbeat.common.entity.message.CollectRep; import org.apache.hertzbeat.common.util.JsonUtil; -import org.apache.hertzbeat.warehouse.config.WarehouseProperties; +import org.apache.hertzbeat.warehouse.config.store.influxdb.InfluxdbProperties; import org.apache.http.ssl.SSLContexts; import org.influxdb.InfluxDB; import org.influxdb.InfluxDBFactory; @@ -79,11 +79,11 @@ public class HistoryInfluxdbDataStorage extends AbstractHistoryDataStorage { private InfluxDB influxDb; - public HistoryInfluxdbDataStorage(WarehouseProperties properties) { - this.initInfluxDb(properties); + public HistoryInfluxdbDataStorage(InfluxdbProperties influxdbProperties) { + this.initInfluxDb(influxdbProperties); } - public void initInfluxDb(WarehouseProperties properties) { + public void initInfluxDb(InfluxdbProperties influxdbProperties) { OkHttpClient.Builder client = new OkHttpClient.Builder() .connectTimeout(10, TimeUnit.SECONDS) .writeTimeout(10, TimeUnit.SECONDS) @@ -93,7 +93,6 @@ public void initInfluxDb(WarehouseProperties properties) { client.sslSocketFactory(defaultSslSocketFactory(), defaultTrustManager()); client.hostnameVerifier(noopHostnameVerifier()); - WarehouseProperties.StoreProperties.InfluxdbProperties influxdbProperties = properties.store().influxdb(); this.influxDb = InfluxDBFactory.connect(influxdbProperties.serverUrl(), influxdbProperties.username(), influxdbProperties.password(), client); // Close it if your application is terminating, or you are not using it anymore. Runtime.getRuntime().addShutdownHook(new Thread(influxDb::close)); @@ -101,7 +100,7 @@ public void initInfluxDb(WarehouseProperties properties) { this.serverAvailable = this.createDatabase(influxdbProperties); } - private boolean createDatabase(WarehouseProperties.StoreProperties.InfluxdbProperties influxdbProperties) { + private boolean createDatabase(InfluxdbProperties influxdbProperties) { QueryResult queryResult = this.influxDb.query(new Query(SHOW_DATABASE)); if (queryResult.hasError()) { diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/HistoryIotDbDataStorage.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/HistoryIotDbDataStorage.java index ab928e37b8e..39a9358eb59 100644 --- a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/HistoryIotDbDataStorage.java +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/HistoryIotDbDataStorage.java @@ -29,8 +29,8 @@ import org.apache.hertzbeat.common.entity.dto.Value; import org.apache.hertzbeat.common.entity.message.CollectRep; import org.apache.hertzbeat.common.util.JsonUtil; -import org.apache.hertzbeat.warehouse.config.IotDbVersion; -import org.apache.hertzbeat.warehouse.config.WarehouseProperties; +import org.apache.hertzbeat.warehouse.config.store.iotdb.IotDbProperties; +import org.apache.hertzbeat.warehouse.config.store.iotdb.IotDbVersion; import org.apache.iotdb.rpc.IoTDBConnectionException; import org.apache.iotdb.rpc.StatementExecutionException; import org.apache.iotdb.session.pool.SessionDataSetWrapper; @@ -87,11 +87,11 @@ public class HistoryIotDbDataStorage extends AbstractHistoryDataStorage { private long queryTimeoutInMs; - public HistoryIotDbDataStorage(WarehouseProperties properties) { - this.serverAvailable = this.initIotDbSession(properties.store().iotDb()); + public HistoryIotDbDataStorage(IotDbProperties iotDbProperties) { + this.serverAvailable = this.initIotDbSession(iotDbProperties); } - private boolean initIotDbSession(WarehouseProperties.StoreProperties.IotDbProperties properties) { + private boolean initIotDbSession(IotDbProperties properties) { SessionPool.Builder builder = new SessionPool.Builder(); builder.host(properties.host()); if (properties.rpcPort() != null) { diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/HistoryJpaDatabaseDataStorage.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/HistoryJpaDatabaseDataStorage.java index efe9c7e4caf..24c9847e763 100644 --- a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/HistoryJpaDatabaseDataStorage.java +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/HistoryJpaDatabaseDataStorage.java @@ -42,7 +42,7 @@ import org.apache.hertzbeat.common.entity.warehouse.History; import org.apache.hertzbeat.common.util.JsonUtil; import org.apache.hertzbeat.common.util.TimePeriodUtil; -import org.apache.hertzbeat.warehouse.config.WarehouseProperties; +import org.apache.hertzbeat.warehouse.config.store.jpa.JpaProperties; import org.apache.hertzbeat.warehouse.dao.HistoryDao; import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; import org.springframework.data.domain.Sort; @@ -58,13 +58,13 @@ @Slf4j public class HistoryJpaDatabaseDataStorage extends AbstractHistoryDataStorage { private final HistoryDao historyDao; - private final WarehouseProperties.StoreProperties.JpaProperties jpaProperties; + private final JpaProperties jpaProperties; private static final int STRING_MAX_LENGTH = 1024; - public HistoryJpaDatabaseDataStorage(WarehouseProperties properties, + public HistoryJpaDatabaseDataStorage(JpaProperties jpaProperties, HistoryDao historyDao) { - this.jpaProperties = properties.store().jpa(); + this.jpaProperties = jpaProperties; this.serverAvailable = true; this.historyDao = historyDao; expiredDataCleaner(); diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/HistoryTdEngineDataStorage.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/HistoryTdEngineDataStorage.java index 490377da7c0..d760f3a17b7 100644 --- a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/HistoryTdEngineDataStorage.java +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/HistoryTdEngineDataStorage.java @@ -37,7 +37,7 @@ import org.apache.hertzbeat.common.entity.dto.Value; import org.apache.hertzbeat.common.entity.message.CollectRep; import org.apache.hertzbeat.common.util.JsonUtil; -import org.apache.hertzbeat.warehouse.config.WarehouseProperties; +import org.apache.hertzbeat.warehouse.config.store.tdengine.TdEngineProperties; import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; import org.springframework.context.annotation.Primary; import org.springframework.stereotype.Component; @@ -71,16 +71,16 @@ public class HistoryTdEngineDataStorage extends AbstractHistoryDataStorage { private HikariDataSource hikariDataSource; private final int tableStrColumnDefineMaxLength; - public HistoryTdEngineDataStorage(WarehouseProperties properties) { - if (properties == null || properties.store() == null || properties.store().tdEngine() == null) { + public HistoryTdEngineDataStorage(TdEngineProperties tdEngineProperties) { + if (tdEngineProperties == null) { log.error("init error, please config Warehouse TdEngine props in application.yml"); throw new IllegalArgumentException("please config Warehouse TdEngine props"); } - tableStrColumnDefineMaxLength = properties.store().tdEngine().tableStrColumnDefineMaxLength(); - serverAvailable = initTdEngineDatasource(properties.store().tdEngine()); + tableStrColumnDefineMaxLength = tdEngineProperties.tableStrColumnDefineMaxLength(); + serverAvailable = initTdEngineDatasource(tdEngineProperties); } - private boolean initTdEngineDatasource(WarehouseProperties.StoreProperties.TdEngineProperties tdEngineProperties) { + private boolean initTdEngineDatasource(TdEngineProperties tdEngineProperties) { HikariConfig config = new HikariConfig(); // jdbc properties config.setJdbcUrl(tdEngineProperties.url()); diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/HistoryVictoriaMetricsDataStorage.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/HistoryVictoriaMetricsDataStorage.java index 4c27cf868f9..b62f7cc1b56 100644 --- a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/HistoryVictoriaMetricsDataStorage.java +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/HistoryVictoriaMetricsDataStorage.java @@ -45,7 +45,7 @@ import org.apache.hertzbeat.common.util.CommonUtil; import org.apache.hertzbeat.common.util.JsonUtil; import org.apache.hertzbeat.common.util.TimePeriodUtil; -import org.apache.hertzbeat.warehouse.config.WarehouseProperties; +import org.apache.hertzbeat.warehouse.config.store.vm.VictoriaMetricsProperties; import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; import org.springframework.context.annotation.Primary; import org.springframework.http.HttpEntity; @@ -87,17 +87,17 @@ public class HistoryVictoriaMetricsDataStorage extends AbstractHistoryDataStorag private static final String MONITOR_METRICS_KEY = "__metrics__"; private static final String MONITOR_METRIC_KEY = "__metric__"; - private final WarehouseProperties.StoreProperties.VictoriaMetricsProperties victoriaMetricsProp; + private final VictoriaMetricsProperties victoriaMetricsProp; private final RestTemplate restTemplate; - public HistoryVictoriaMetricsDataStorage(WarehouseProperties properties, RestTemplate restTemplate) { - if (properties == null || properties.store() == null || properties.store().victoriaMetrics() == null) { + public HistoryVictoriaMetricsDataStorage(VictoriaMetricsProperties victoriaMetricsProperties, RestTemplate restTemplate) { + if (victoriaMetricsProperties == null) { log.error("init error, please config Warehouse victoriaMetrics props in application.yml"); throw new IllegalArgumentException("please config Warehouse victoriaMetrics props"); } this.restTemplate = restTemplate; - victoriaMetricsProp = properties.store().victoriaMetrics(); + victoriaMetricsProp = victoriaMetricsProperties; serverAvailable = checkVictoriaMetricsDatasourceAvailable(); } diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/RealTimeMemoryDataStorage.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/RealTimeMemoryDataStorage.java index 94beb837c83..efcf6fe6175 100644 --- a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/RealTimeMemoryDataStorage.java +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/RealTimeMemoryDataStorage.java @@ -23,7 +23,7 @@ import java.util.concurrent.ConcurrentHashMap; import lombok.extern.slf4j.Slf4j; import org.apache.hertzbeat.common.entity.message.CollectRep; -import org.apache.hertzbeat.warehouse.config.WarehouseProperties; +import org.apache.hertzbeat.warehouse.config.store.memory.MemoryProperties; import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; import org.springframework.lang.NonNull; import org.springframework.stereotype.Component; @@ -44,11 +44,11 @@ public class RealTimeMemoryDataStorage extends AbstractRealTimeDataStorage { private static final Integer DEFAULT_INIT_SIZE = 16; private static final Integer METRICS_SIZE = 8; - public RealTimeMemoryDataStorage(WarehouseProperties properties) { + public RealTimeMemoryDataStorage(MemoryProperties memoryProperties) { int initSize = DEFAULT_INIT_SIZE; - if (properties != null && properties.store() != null && properties.store().memory() != null - && properties.store().memory().initSize() != null) { - initSize = properties.store().memory().initSize(); + if (memoryProperties != null + && memoryProperties.initSize() != null) { + initSize = memoryProperties.initSize(); } monitorMetricsDataMap = new ConcurrentHashMap<>(initSize); this.serverAvailable = true; diff --git a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/RealTimeRedisDataStorage.java b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/RealTimeRedisDataStorage.java index 5d8c1459b1e..e5f25ad10fc 100644 --- a/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/RealTimeRedisDataStorage.java +++ b/warehouse/src/main/java/org/apache/hertzbeat/warehouse/store/RealTimeRedisDataStorage.java @@ -29,7 +29,7 @@ import java.util.Map; import lombok.extern.slf4j.Slf4j; import org.apache.hertzbeat.common.entity.message.CollectRep; -import org.apache.hertzbeat.warehouse.config.WarehouseProperties; +import org.apache.hertzbeat.warehouse.config.store.redis.RedisProperties; import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; import org.springframework.context.annotation.Primary; import org.springframework.lang.NonNull; @@ -49,13 +49,13 @@ public class RealTimeRedisDataStorage extends AbstractRealTimeDataStorage { private final Integer db; private StatefulRedisConnection connection; - public RealTimeRedisDataStorage(WarehouseProperties properties) { - this.serverAvailable = initRedisClient(properties); - this.db = getRedisSelectDb(properties); + public RealTimeRedisDataStorage(RedisProperties redisProperties) { + this.serverAvailable = initRedisClient(redisProperties); + this.db = getRedisSelectDb(redisProperties); } - private Integer getRedisSelectDb(WarehouseProperties properties){ - return properties.store().redis().db(); + private Integer getRedisSelectDb(RedisProperties redisProperties){ + return redisProperties.db(); } @Override @@ -95,12 +95,12 @@ public void saveData(CollectRep.MetricsData metricsData) { }); } - private boolean initRedisClient(WarehouseProperties properties) { - if (properties == null || properties.store() == null || properties.store().redis() == null) { + private boolean initRedisClient(RedisProperties redisProperties) { + if (redisProperties == null) { log.error("init error, please config Warehouse redis props in application.yml"); return false; } - WarehouseProperties.StoreProperties.RedisProperties redisProp = properties.store().redis(); + RedisProperties redisProp = redisProperties; RedisURI.Builder uriBuilder = RedisURI.builder() .withHost(redisProp.host()) .withPort(redisProp.port())