Skip to content

Commit

Permalink
#66 adds more metrics for spool stats
Browse files Browse the repository at this point in the history
  • Loading branch information
GreenRover committed Feb 19, 2024
1 parent a29303a commit 1430caa
Show file tree
Hide file tree
Showing 5 changed files with 118 additions and 1 deletion.
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,7 @@ not starting with the word "internal"
| Memory | no | no | no | dont harm broker | show memory | software, appliance |
| Interface | no | yes | no | dont harm broker | show interface interfaceFilter | software, appliance |
| GlobalStats | no | no | no | dont harm broker | show stats client | software, appliance |
| GlobalSystemInfo | no | no | no | dont harm broker | show system | software, appliance |
| Spool | no | no | no | dont harm broker | show message-spool | software, appliance |
| Redundancy (only for HA broker) | no | no | no | dont harm broker | show redundancy | software, appliance |
| ConfigSync (only for HA broker) | no | no | no | dont harm broker | show config-sync | software, appliance |
Expand Down
2 changes: 2 additions & 0 deletions exporter/exporter.collect.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,8 @@ func (e *Exporter) CollectPrometheusMetric(ch chan<- semp.PrometheusMetric) {
up, err = e.semp.GetInterfaceSemp1(ch, dataSource.ItemFilter)
case "GlobalStats", "GlobalStatsV1":
up, err = e.semp.GetGlobalStatsSemp1(ch)
case "GlobalSystemInfo", "GlobalSystemInfoV1":
up, err = e.semp.GetGlobalSystemInfoSemp1(ch)
case "Spool", "SpoolV1":
up, err = e.semp.GetSpoolSemp1(ch)
case "Redundancy", "RedundancyV1":
Expand Down
46 changes: 46 additions & 0 deletions semp/getGlobalStatsSemp1.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,52 @@ import (
)

// Get global stats information
func (e *Semp) GetGlobalSystemInfoSemp1(ch chan<- PrometheusMetric) (ok float64, err error) {
type Data struct {
RPC struct {
Show struct {
System struct {
UptimeSeconds float64 `xml:"system-uptime-seconds"`
ConnectionsQuota float64 `xml:"max-connections"`
MessagesQueueQuota float64 `xml:"max-queue-messages"`
CpuCores float64 `xml:"cpu-cores"`
SystemMemory float64 `xml:"system-memory"`
} `xml:"system"`
} `xml:"show"`
} `xml:"rpc"`
ExecuteResult struct {
Result string `xml:"code,attr"`
} `xml:"execute-result"`
}

command := "<rpc><show><system/></show></rpc>"
body, err := e.postHTTP(e.brokerURI+"/SEMP", "application/xml", command, "GetGlobalSystemInfoSemp1", 1)
if err != nil {
_ = level.Error(e.logger).Log("msg", "Can't scrape GetGlobalSystemInfoSemp1", "err", err, "broker", e.brokerURI)
return 0, err
}
defer body.Close()
decoder := xml.NewDecoder(body)
var target Data
err = decoder.Decode(&target)
if err != nil {
_ = level.Error(e.logger).Log("msg", "Can't decode Xml GetGlobalSystemInfoSemp1", "err", err, "broker", e.brokerURI)
return 0, err
}
if target.ExecuteResult.Result != "ok" {
_ = level.Error(e.logger).Log("msg", "unexpected result", "command", command, "result", target.ExecuteResult.Result, "broker", e.brokerURI)
return 0, errors.New("unexpected result: see log")
}

ch <- e.NewMetric(MetricDesc["GlobalStats"]["system_uptime_seconds"], prometheus.GaugeValue, target.RPC.Show.System.UptimeSeconds)
ch <- e.NewMetric(MetricDesc["GlobalStats"]["system_total_clients_quota"], prometheus.GaugeValue, target.RPC.Show.System.ConnectionsQuota)
ch <- e.NewMetric(MetricDesc["GlobalStats"]["system_message_spool_quota"], prometheus.GaugeValue, target.RPC.Show.System.MessagesQueueQuota*1000000)
ch <- e.NewMetric(MetricDesc["GlobalStats"]["system_cpu_cores"], prometheus.GaugeValue, target.RPC.Show.System.CpuCores)
ch <- e.NewMetric(MetricDesc["GlobalStats"]["system_memory_bytes"], prometheus.GaugeValue, target.RPC.Show.System.SystemMemory*1073741824.0)

return 1, nil
}

func (e *Semp) GetGlobalStatsSemp1(ch chan<- PrometheusMetric) (ok float64, err error) {
type Data struct {
RPC struct {
Expand Down
46 changes: 45 additions & 1 deletion semp/getSpoolSemp1.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,28 @@ func (e *Semp) GetSpoolSemp1(ch chan<- PrometheusMetric) (ok float64, err error)
ActiveDiskPartitionUsage string `xml:"active-disk-partition-usage"` // May be "-"
MateDiskPartitionUsage string `xml:"mate-disk-partition-usage"` // May be "-"
SpoolFilesUtilizationPercentage string `xml:"spool-files-utilization-percentage"` // May be "-"
SpoolSyncStatus string `xml:"spool-sync-status"`

IngressFlowsQuota float64 `xml:"ingress-flows-allowed"`
IngressFlowsCount float64 `xml:"ingress-flow-count"`
EgressFlowsQuota float64 `xml:"flows-allowed"`
EgressFlowsActive float64 `xml:"active-flow-count"`
EgressFlowsInactive float64 `xml:"inactive-flow-count"`
EgressFlowsBrowser float64 `xml:"browser-flow-count"`

EntitiesByQendptQuota float64 `xml:"message-spool-entities-allowed-by-qendpt"`
EntitiesByQendptQueue float64 `xml:"message-spool-entities-used-by-queue"`
EntitiesByQendptDte float64 `xml:"message-spool-entities-used-by-dte"`

TransactedSessionsQuota float64 `xml:"max-transacted-sessions"`
TransactedSessionsUsed float64 `xml:"transacted-sessions-used"`

TransactionsQuota float64 `xml:"max-transactions"`
TransactionsUsed float64 `xml:"transactions-used"`

CurrentPersistentStoreUsageADB float64 `xml:"current-rfad-usage"`
MessagesCurrentlySpooledADB float64 `xml:"rfad-messages-currently-spooled"`
MessagesCurrentlySpooledDisk float64 `xml:"disk-messages-currently-spooled"`
} `xml:"message-spool-info"`
} `xml:"message-spool"`
} `xml:"show"`
Expand All @@ -34,7 +56,7 @@ func (e *Semp) GetSpoolSemp1(ch chan<- PrometheusMetric) (ok float64, err error)
} `xml:"execute-result"`
}

command := "<rpc><show><message-spool></message-spool></show ></rpc>"
command := "<rpc><show><message-spool><detail/></message-spool></show ></rpc>"
body, err := e.postHTTP(e.brokerURI+"/SEMP", "application/xml", command, "SpoolSemp1", 1)
if err != nil {
_ = level.Error(e.logger).Log("msg", "Can't scrape Solace", "err", err, "broker", e.brokerURI)
Expand Down Expand Up @@ -74,7 +96,29 @@ func (e *Semp) GetSpoolSemp1(ch chan<- PrometheusMetric) (ok float64, err error)
}

ch <- e.NewMetric(MetricDesc["Spool"]["system_spool_usage_bytes"], prometheus.GaugeValue, math.Round(target.RPC.Show.Spool.Info.PersistUsage*1048576.0))
ch <- e.NewMetric(MetricDesc["Spool"]["system_spool_usage_adb_bytes"], prometheus.GaugeValue, target.RPC.Show.Spool.Info.CurrentPersistentStoreUsageADB*1048576.0)
ch <- e.NewMetric(MetricDesc["Spool"]["system_spool_usage_msgs"], prometheus.GaugeValue, target.RPC.Show.Spool.Info.PersistMsgCount)

ch <- e.NewMetric(MetricDesc["Spool"]["system_spool_ingress_flows_quota"], prometheus.GaugeValue, target.RPC.Show.Spool.Info.IngressFlowsQuota)
ch <- e.NewMetric(MetricDesc["Spool"]["system_spool_ingress_flows_count"], prometheus.GaugeValue, target.RPC.Show.Spool.Info.IngressFlowsCount)

ch <- e.NewMetric(MetricDesc["Spool"]["system_spool_egress_flows_quota"], prometheus.GaugeValue, target.RPC.Show.Spool.Info.EgressFlowsQuota)
ch <- e.NewMetric(MetricDesc["Spool"]["system_spool_egress_flows_count"], prometheus.GaugeValue, target.RPC.Show.Spool.Info.EgressFlowsActive+target.RPC.Show.Spool.Info.EgressFlowsInactive+target.RPC.Show.Spool.Info.EgressFlowsBrowser)
ch <- e.NewMetric(MetricDesc["Spool"]["system_spool_egress_flows_active"], prometheus.GaugeValue, target.RPC.Show.Spool.Info.EgressFlowsActive)
ch <- e.NewMetric(MetricDesc["Spool"]["system_spool_egress_flows_inactive"], prometheus.GaugeValue, target.RPC.Show.Spool.Info.EgressFlowsInactive)
ch <- e.NewMetric(MetricDesc["Spool"]["system_spool_egress_flows_browser"], prometheus.GaugeValue, target.RPC.Show.Spool.Info.EgressFlowsBrowser)

ch <- e.NewMetric(MetricDesc["Spool"]["system_spool_endpoints_quota"], prometheus.GaugeValue, target.RPC.Show.Spool.Info.EntitiesByQendptQuota)
ch <- e.NewMetric(MetricDesc["Spool"]["system_spool_endpoints_queue"], prometheus.GaugeValue, target.RPC.Show.Spool.Info.EntitiesByQendptQueue)
ch <- e.NewMetric(MetricDesc["Spool"]["system_spool_endpoints_dte"], prometheus.GaugeValue, target.RPC.Show.Spool.Info.EntitiesByQendptDte)

ch <- e.NewMetric(MetricDesc["Spool"]["system_spool_transacted_sessions_quota"], prometheus.GaugeValue, target.RPC.Show.Spool.Info.TransactedSessionsQuota)
ch <- e.NewMetric(MetricDesc["Spool"]["system_spool_transacted_sessions_used"], prometheus.GaugeValue, target.RPC.Show.Spool.Info.TransactedSessionsUsed)
ch <- e.NewMetric(MetricDesc["Spool"]["system_spool_transactions_quota"], prometheus.GaugeValue, target.RPC.Show.Spool.Info.TransactionsQuota)
ch <- e.NewMetric(MetricDesc["Spool"]["system_spool_transactions_used"], prometheus.GaugeValue, target.RPC.Show.Spool.Info.TransactionsUsed)

ch <- e.NewMetric(MetricDesc["Spool"]["system_spool_messages_currently_spooled_adb"], prometheus.GaugeValue, target.RPC.Show.Spool.Info.MessagesCurrentlySpooledADB)
ch <- e.NewMetric(MetricDesc["Spool"]["system_spool_messages_currently_spooled_disk"], prometheus.GaugeValue, target.RPC.Show.Spool.Info.MessagesCurrentlySpooledDisk)

return 1, nil
}
24 changes: 24 additions & 0 deletions semp/metricDesc.go
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,11 @@ var MetricDesc = map[string]Descriptions{
//SEMPv1: show stats client
"GlobalStats": {
"system_total_clients_connected": NewSemDesc("system_total_clients_connected", NoSempV2Ready, "Total clients connected.", nil),
"system_total_clients_quota": NewSemDesc("system_total_clients_quota", NoSempV2Ready, "Number of maximal possible clients to be connected.", nil),
"system_message_spool_quota": NewSemDesc("system_message_spool_quota", NoSempV2Ready, "Number of maximal possible messages to be queue.", nil),
"system_uptime_seconds": NewSemDesc("system_uptime_seconds", NoSempV2Ready, "Uptime in seconds.", nil),
"system_cpu_cores": NewSemDesc("system_cpu_cores", NoSempV2Ready, "Available cpu cores.", nil),
"system_memory_bytes": NewSemDesc("system_memory_bytes", NoSempV2Ready, "Available ram in bytes.", nil),
"system_rx_msgs_total": NewSemDesc("system_rx_msgs_total", NoSempV2Ready, "Total client messages received.", nil),
"system_tx_msgs_total": NewSemDesc("system_tx_msgs_total", NoSempV2Ready, "Total client messages sent.", nil),
"system_rx_bytes_total": NewSemDesc("system_rx_bytes_total", NoSempV2Ready, "Total client bytes received.", nil),
Expand All @@ -108,6 +113,25 @@ var MetricDesc = map[string]Descriptions{
"system_spool_usage_msgs": NewSemDesc("system_spool_usage_msgs", NoSempV2Ready, "Spool total number of persisted messages.", nil),
"system_spool_files_utilization_percent": NewSemDesc("system_spool_files_utilization_percent", NoSempV2Ready, "Utilization of spool files in percent.", nil),
"system_spool_message_count_utilization_percent": NewSemDesc("system_spool_message_count_utilization_percent", NoSempV2Ready, "Utilization of queue message resource in percent.", nil),

"system_spool_ingress_flows_quota": NewSemDesc("system_spool_ingress_flows_quota", NoSempV2Ready, "Number of maximal possible ingress flows.", nil),
"system_spool_ingress_flows_count": NewSemDesc("system_spool_ingress_flows_count", NoSempV2Ready, "Number of used ingress flows.", nil),
"system_spool_egress_flows_quota": NewSemDesc("system_spool_egress_flows_quota", NoSempV2Ready, "Number of maximal possible egress flows.", nil),
"system_spool_egress_flows_count": NewSemDesc("system_spool_egress_flows_count", NoSempV2Ready, "Number of used egress flows.", nil),
"system_spool_egress_flows_active": NewSemDesc("system_spool_egress_flows_active", NoSempV2Ready, "Number of used egress flows in state active.", nil),
"system_spool_egress_flows_inactive": NewSemDesc("system_spool_egress_flows_inactive", NoSempV2Ready, "Number of used egress flows in state inactive.", nil),
"system_spool_egress_flows_browser": NewSemDesc("system_spool_egress_flows_browser", NoSempV2Ready, "Number of used egress flows in queue browser mode.", nil),
"system_spool_endpoints_quota": NewSemDesc("system_spool_endpoints_quota", NoSempV2Ready, "Number of maximal possible queue or topic endpoints.", nil),
"system_spool_endpoints_queue": NewSemDesc("system_spool_endpoints_queue", NoSempV2Ready, "Number of existing queue endpoints.", nil),
"system_spool_endpoints_dte": NewSemDesc("system_spool_endpoints_dte", NoSempV2Ready, "Number of existing topic endpoints.", nil),
"system_spool_transacted_sessions_quota": NewSemDesc("system_spool_transacted_sessions_quota", NoSempV2Ready, "Number of maximal possible transacted sessions.", nil),
"system_spool_transacted_sessions_used": NewSemDesc("system_spool_transacted_sessions_used", NoSempV2Ready, "Number of used transacted sessions.", nil),
"system_spool_transactions_quota": NewSemDesc("system_spool_transactions_quota", NoSempV2Ready, "Number of maximal possible transactions.", nil),
"system_spool_transactions_used": NewSemDesc("system_spool_transactions_used", NoSempV2Ready, "Number of used transactions.", nil),

"system_spool_usage_adb_bytes": NewSemDesc("system_spool_usage_adb_bytes", NoSempV2Ready, "Spool total persisted usage in adb.", nil),
"system_spool_messages_currently_spooled_adb": NewSemDesc("system_spool_messages_currently_spooled_adb", NoSempV2Ready, "Messages stored in adb.", nil),
"system_spool_messages_currently_spooled_disk": NewSemDesc("system_spool_messages_currently_spooled_disk", NoSempV2Ready, "Messages stored on disk.", nil),
},
"Redundancy": {
"system_redundancy_up": NewSemDesc("system_redundancy_up", NoSempV2Ready, "Is redundancy up? (0=Down, 1=Up).", variableLabelsRedundancy),
Expand Down

0 comments on commit 1430caa

Please sign in to comment.