diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml new file mode 100644 index 00000000..c545dc7d --- /dev/null +++ b/.github/workflows/golangci-lint.yml @@ -0,0 +1,30 @@ +# This action is synced from https://github.com/prometheus/prometheus +name: golangci-lint +on: + push: + paths: + - "go.sum" + - "go.mod" + - "**.go" + - "scripts/errcheck_excludes.txt" + - ".github/workflows/golangci-lint.yml" + - ".golangci.yml" + pull_request: + +jobs: + golangci: + name: lint + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Install Go + uses: actions/setup-go@v3 + with: + go-version: 1.20.x + + - name: Lint + uses: golangci/golangci-lint-action@v3 + with: + version: v1.54.2 diff --git a/.gitignore b/.gitignore index 3b735ec4..c8594976 100644 --- a/.gitignore +++ b/.gitignore @@ -19,3 +19,12 @@ # Go workspace file go.work + +# Ignore fixtures +collector/fixtures/sys + +# Ignore binary +**/*batchjob_exporter + +# Ignore scripts +run.sh diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 00000000..3f7e4589 --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,33 @@ +linters: + enable: + - misspell + - revive + disable: + # Disable soon to deprecated[1] linters that lead to false + # positives when build tags disable certain files[2] + # 1: https://github.com/golangci/golangci-lint/issues/1841 + # 2: https://github.com/prometheus/node_exporter/issues/1545 + - deadcode + - unused + - structcheck + - varcheck + +issues: + exclude-rules: + - path: _test.go + linters: + - errcheck + +linters-settings: + errcheck: + exclude-functions: + # Used in HTTP handlers, any error is handled by the server itself. + - (net/http.ResponseWriter).Write + # Never check for logger errors. + - (github.com/go-kit/log.Logger).Log + revive: + rules: + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unused-parameter + - name: unused-parameter + severity: warning + disabled: true diff --git a/.promu-cgo.yml b/.promu-cgo.yml new file mode 100644 index 00000000..5bf8c819 --- /dev/null +++ b/.promu-cgo.yml @@ -0,0 +1,27 @@ +go: + # Whenever the Go version is updated here, .circle/config.yml and + # .promu.yml should also be updated. + version: 1.21 + cgo: true +repository: + path: github.com/mahendrapaipuri/batchjob_exporter +build: + binaries: + - name: batchjob_exporter + flags: -a -tags 'netgo osusergo static_build' + ldflags: | + -X github.com/prometheus/common/version.Version={{.Version}} + -X github.com/prometheus/common/version.Revision={{.Revision}} + -X github.com/prometheus/common/version.Branch={{.Branch}} + -X github.com/prometheus/common/version.BuildUser={{user}}@{{host}} + -X github.com/prometheus/common/version.BuildDate={{date "20060102-15:04:05"}} +tarball: + files: + - LICENSE + - NOTICE +crossbuild: + platforms: + - darwin/amd64 + - darwin/arm64 + - netbsd/amd64 + - netbsd/386 diff --git a/.promu.yml b/.promu.yml new file mode 100644 index 00000000..275cfa86 --- /dev/null +++ b/.promu.yml @@ -0,0 +1,24 @@ +go: + # Whenever the Go version is updated here, .circle/config.yml and + # .promu-cgo.yml should also be updated. + version: 1.21 +repository: + path: github.com/mahendrapaipuri/batchjob_exporter +build: + binaries: + - name: batchjob_exporter + flags: -a -tags 'netgo osusergo static_build' + ldflags: | + -X github.com/prometheus/common/version.Version={{.Version}} + -X github.com/prometheus/common/version.Revision={{.Revision}} + -X github.com/prometheus/common/version.Branch={{.Branch}} + -X github.com/prometheus/common/version.BuildUser={{user}}@{{host}} + -X github.com/prometheus/common/version.BuildDate={{date "20060102-15:04:05"}} +tarball: + files: + - LICENSE + - NOTICE +crossbuild: + platforms: + - linux + - openbsd/amd64 diff --git a/.yamllint b/.yamllint new file mode 100644 index 00000000..955a5a62 --- /dev/null +++ b/.yamllint @@ -0,0 +1,23 @@ +--- +extends: default + +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + commas: disable + comments: disable + comments-indentation: disable + document-start: disable + indentation: + spaces: consistent + indent-sequences: consistent + key-duplicates: + ignore: | + config/testdata/section_key_dup.bad.yml + line-length: disable + truthy: + check-keys: false diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..43021db1 --- /dev/null +++ b/Makefile @@ -0,0 +1,134 @@ +# Nicked from node_exporter repo and modified to current exporter + +# Ensure that 'all' is the default target otherwise it will be the first target from Makefile.common. +all:: + +# Needs to be defined before including Makefile.common to auto-generate targets +DOCKER_ARCHS ?= amd64 armv7 arm64 ppc64le s390x + +include Makefile.common + +PROMTOOL_VERSION ?= 2.30.0 +PROMTOOL_URL ?= https://github.com/prometheus/prometheus/releases/download/v$(PROMTOOL_VERSION)/prometheus-$(PROMTOOL_VERSION).$(GO_BUILD_PLATFORM).tar.gz +PROMTOOL ?= $(FIRST_GOPATH)/bin/promtool + +DOCKER_IMAGE_NAME ?= batchjob-exporter +MACH ?= $(shell uname -m) + +STATICCHECK_IGNORE = + +ifeq ($(GOHOSTOS), linux) + test-e2e := test-e2e +else + test-e2e := skip-test-e2e +endif + +# Use CGO for non-Linux builds. +ifeq ($(GOOS), linux) + PROMU_CONF ?= .promu.yml +else + ifndef GOOS + ifeq ($(GOHOSTOS), linux) + PROMU_CONF ?= .promu.yml + else + PROMU_CONF ?= .promu-cgo.yml + endif + else + # Do not use CGO for openbsd/amd64 builds + ifeq ($(GOOS), openbsd) + ifeq ($(GOARCH), amd64) + PROMU_CONF ?= .promu.yml + else + PROMU_CONF ?= .promu-cgo.yml + endif + else + PROMU_CONF ?= .promu-cgo.yml + endif + endif +endif + +PROMU := $(FIRST_GOPATH)/bin/promu --config $(PROMU_CONF) + +e2e-out-64k-page = collector/fixtures/e2e-64k-page-output.txt +e2e-out = collector/fixtures/e2e-output.txt +ifeq ($(MACH), ppc64le) + e2e-out = $(e2e-out-64k-page) +endif +ifeq ($(MACH), aarch64) + e2e-out = $(e2e-out-64k-page) +endif + +# 64bit -> 32bit mapping for cross-checking. At least for amd64/386, the 64bit CPU can execute 32bit code but not the other way around, so we don't support cross-testing upwards. +cross-test = skip-test-32bit +define goarch_pair + ifeq ($$(GOHOSTOS),linux) + ifeq ($$(GOHOSTARCH),$1) + GOARCH_CROSS = $2 + cross-test = test-32bit + endif + endif +endef + +# By default, "cross" test with ourselves to cover unknown pairings. +$(eval $(call goarch_pair,amd64,386)) +$(eval $(call goarch_pair,mips64,mips)) +$(eval $(call goarch_pair,mips64el,mipsel)) + +all:: vet checkmetrics checkrules common-all $(cross-test) $(test-e2e) + +.PHONY: test +test: collector/fixtures/sys/.unpacked + @echo ">> running tests" + $(GO) test -short $(test-flags) $(pkgs) + +.PHONY: test-32bit +test-32bit: collector/fixtures/sys/.unpacked + @echo ">> running tests in 32-bit mode" + @env GOARCH=$(GOARCH_CROSS) $(GO) test $(pkgs) + +.PHONY: skip-test-32bit +skip-test-32bit: + @echo ">> SKIP running tests in 32-bit mode: not supported on $(GOHOSTOS)/$(GOHOSTARCH)" + +%/.unpacked: %.ttar + @echo ">> extracting fixtures" + if [ -d $(dir $@) ] ; then rm -rf $(dir $@) ; fi + ./ttar -C $(dir $*) -x -f $*.ttar + touch $@ + +update_fixtures: + rm -vf collector/fixtures/sys/.unpacked + ./ttar -C collector/fixtures -c -f collector/fixtures/sys.ttar sys + + +.PHONY: test-e2e +test-e2e: build collector/fixtures/sys/.unpacked + @echo ">> running end-to-end tests" + ./end-to-end-test.sh + +.PHONY: skip-test-e2e +skip-test-e2e: + @echo ">> SKIP running end-to-end tests on $(GOHOSTOS)" + +.PHONY: checkmetrics +checkmetrics: $(PROMTOOL) + @echo ">> checking metrics for correctness" + ./checkmetrics.sh $(PROMTOOL) $(e2e-out) + ./checkmetrics.sh $(PROMTOOL) $(e2e-out-64k-page) + +.PHONY: checkrules +checkrules: $(PROMTOOL) + @echo ">> checking rules for correctness" + find . -name "*rules*.yml" | xargs -I {} $(PROMTOOL) check rules {} + +.PHONY: test-docker +test-docker: + @echo ">> testing docker image" + ./test_image.sh "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-amd64:$(DOCKER_IMAGE_TAG)" 9100 + +.PHONY: promtool +promtool: $(PROMTOOL) + +$(PROMTOOL): + mkdir -p $(FIRST_GOPATH)/bin + curl -fsS -L $(PROMTOOL_URL) | tar -xvzf - -C $(FIRST_GOPATH)/bin --strip 1 "prometheus-$(PROMTOOL_VERSION).$(GO_BUILD_PLATFORM)/promtool" diff --git a/Makefile.common b/Makefile.common new file mode 100644 index 00000000..3761597b --- /dev/null +++ b/Makefile.common @@ -0,0 +1,255 @@ +# A common Makefile that includes rules to be reused in different prometheus projects. +# !!! Open PRs only against the prometheus/prometheus/Makefile.common repository! + +# Example usage : +# Create the main Makefile in the root project directory. +# include Makefile.common +# customTarget: +# @echo ">> Running customTarget" +# + +# Ensure GOBIN is not set during build so that promu is installed to the correct path +unexport GOBIN + +GO ?= go +GOFMT ?= $(GO)fmt +FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) +GOOPTS ?= +GOHOSTOS ?= $(shell $(GO) env GOHOSTOS) +GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH) + +GO_VERSION ?= $(shell $(GO) version) +GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) +PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') + +PROMU := $(FIRST_GOPATH)/bin/promu +pkgs = ./... + +ifeq (arm, $(GOHOSTARCH)) + GOHOSTARM ?= $(shell GOARM= $(GO) env GOARM) + GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM) +else + GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH) +endif + +GOTEST := $(GO) test +GOTEST_DIR := +ifneq ($(CIRCLE_JOB),) +ifneq ($(shell command -v gotestsum > /dev/null),) + GOTEST_DIR := test-results + GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml -- +endif +endif + +PROMU_VERSION ?= 0.15.0 +PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz + +SKIP_GOLANGCI_LINT := +GOLANGCI_LINT := +GOLANGCI_LINT_OPTS ?= +GOLANGCI_LINT_VERSION ?= v1.54.2 +# golangci-lint only supports linux, darwin and windows platforms on i386/amd64. +# windows isn't included here because of the path separator being different. +ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) + ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386)) + # If we're in CI and there is an Actions file, that means the linter + # is being run in Actions, so we don't need to run it here. + ifneq (,$(SKIP_GOLANGCI_LINT)) + GOLANGCI_LINT := + else ifeq (,$(CIRCLE_JOB)) + GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint + else ifeq (,$(wildcard .github/workflows/golangci-lint.yml)) + GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint + endif + endif +endif + +PREFIX ?= $(shell pwd) +BIN_DIR ?= $(shell pwd) +DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) +DOCKERFILE_PATH ?= ./Dockerfile +DOCKERBUILD_CONTEXT ?= ./ +DOCKER_REPO ?= prom + +DOCKER_ARCHS ?= amd64 + +BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS)) +PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS)) +TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS)) + +SANITIZED_DOCKER_IMAGE_TAG := $(subst +,-,$(DOCKER_IMAGE_TAG)) + +ifeq ($(GOHOSTARCH),amd64) + ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows)) + # Only supported on amd64 + test-flags := -race + endif +endif + +# This rule is used to forward a target like "build" to "common-build". This +# allows a new "build" target to be defined in a Makefile which includes this +# one and override "common-build" without override warnings. +%: common-% ; + +.PHONY: common-all +common-all: precheck style check_license lint yamllint unused build test + +.PHONY: common-style +common-style: + @echo ">> checking code style" + @fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \ + if [ -n "$${fmtRes}" ]; then \ + echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \ + echo "Please ensure you are using $$($(GO) version) for formatting code."; \ + exit 1; \ + fi + +.PHONY: common-check_license +common-check_license: + @echo ">> checking license header" + @licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \ + awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \ + done); \ + if [ -n "$${licRes}" ]; then \ + echo "license header checking failed:"; echo "$${licRes}"; \ + exit 1; \ + fi + +.PHONY: common-deps +common-deps: + @echo ">> getting dependencies" + $(GO) mod download + +.PHONY: update-go-deps +update-go-deps: + @echo ">> updating Go dependencies" + @for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \ + $(GO) get -d $$m; \ + done + $(GO) mod tidy + +.PHONY: common-test-short +common-test-short: $(GOTEST_DIR) + @echo ">> running short tests" + $(GOTEST) -short $(GOOPTS) $(pkgs) + +.PHONY: common-test +common-test: $(GOTEST_DIR) + @echo ">> running all tests" + $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs) + +$(GOTEST_DIR): + @mkdir -p $@ + +.PHONY: common-format +common-format: + @echo ">> formatting code" + $(GO) fmt $(pkgs) + +.PHONY: common-vet +common-vet: + @echo ">> vetting code" + $(GO) vet $(GOOPTS) $(pkgs) + +.PHONY: common-lint +common-lint: $(GOLANGCI_LINT) +ifdef GOLANGCI_LINT + @echo ">> running golangci-lint" +# 'go list' needs to be executed before staticcheck to prepopulate the modules cache. +# Otherwise staticcheck might fail randomly for some reason not yet explained. + $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null + $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) +endif + +.PHONY: common-yamllint +common-yamllint: + @echo ">> running yamllint on all YAML files in the repository" +ifeq (, $(shell command -v yamllint > /dev/null)) + @echo "yamllint not installed so skipping" +else + yamllint . +endif + +# For backward-compatibility. +.PHONY: common-staticcheck +common-staticcheck: lint + +.PHONY: common-unused +common-unused: + @echo ">> running check for unused/missing packages in go.mod" + $(GO) mod tidy + @git diff --exit-code -- go.sum go.mod + +.PHONY: common-build +common-build: promu + @echo ">> building binaries" + $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES) + +.PHONY: common-tarball +common-tarball: promu + @echo ">> building release tarball" + $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) + +.PHONY: common-docker $(BUILD_DOCKER_ARCHS) +common-docker: $(BUILD_DOCKER_ARCHS) +$(BUILD_DOCKER_ARCHS): common-docker-%: + docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \ + -f $(DOCKERFILE_PATH) \ + --build-arg ARCH="$*" \ + --build-arg OS="linux" \ + $(DOCKERBUILD_CONTEXT) + +.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) +common-docker-publish: $(PUBLISH_DOCKER_ARCHS) +$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%: + docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" + +DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION))) +.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS) +common-docker-tag-latest: $(TAG_DOCKER_ARCHS) +$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%: + docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" + docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)" + +.PHONY: common-docker-manifest +common-docker-manifest: + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG)) + DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" + +.PHONY: promu +promu: $(PROMU) + +$(PROMU): + $(eval PROMU_TMP := $(shell mktemp -d)) + curl -s -L $(PROMU_URL) | tar -xvzf - -C $(PROMU_TMP) + mkdir -p $(FIRST_GOPATH)/bin + cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu + rm -r $(PROMU_TMP) + +.PHONY: proto +proto: + @echo ">> generating code from proto files" + @./scripts/genproto.sh + +ifdef GOLANGCI_LINT +$(GOLANGCI_LINT): + mkdir -p $(FIRST_GOPATH)/bin + curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/$(GOLANGCI_LINT_VERSION)/install.sh \ + | sed -e '/install -d/d' \ + | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION) +endif + +.PHONY: precheck +precheck:: + +define PRECHECK_COMMAND_template = +precheck:: $(1)_precheck + +PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1))) +.PHONY: $(1)_precheck +$(1)_precheck: + @if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \ + echo "Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?"; \ + exit 1; \ + fi +endef diff --git a/batchjob_exporter.go b/batchjob_exporter.go new file mode 100644 index 00000000..bffa7934 --- /dev/null +++ b/batchjob_exporter.go @@ -0,0 +1,215 @@ +// Main entrypoint for batchjob_exporter + +package main + +import ( + "fmt" + stdlog "log" + "net/http" + _ "net/http/pprof" + "os" + "os/user" + "runtime" + "sort" + + "github.com/prometheus/common/promlog" + "github.com/prometheus/common/promlog/flag" + + "github.com/alecthomas/kingpin/v2" + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/mahendrapaipuri/batchjob_exporter/collector" + "github.com/prometheus/client_golang/prometheus" + promcollectors "github.com/prometheus/client_golang/prometheus/collectors" + "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/prometheus/common/version" + "github.com/prometheus/exporter-toolkit/web" + "github.com/prometheus/exporter-toolkit/web/kingpinflag" +) + +// handler wraps an unfiltered http.Handler but uses a filtered handler, +// created on the fly, if filtering is requested. Create instances with +// newHandler. +type handler struct { + unfilteredHandler http.Handler + // exporterMetricsRegistry is a separate registry for the metrics about + // the exporter itself. + exporterMetricsRegistry *prometheus.Registry + includeExporterMetrics bool + maxRequests int + logger log.Logger +} + +func newHandler(includeExporterMetrics bool, maxRequests int, logger log.Logger) *handler { + h := &handler{ + exporterMetricsRegistry: prometheus.NewRegistry(), + includeExporterMetrics: includeExporterMetrics, + maxRequests: maxRequests, + logger: logger, + } + if h.includeExporterMetrics { + h.exporterMetricsRegistry.MustRegister( + promcollectors.NewProcessCollector(promcollectors.ProcessCollectorOpts{}), + promcollectors.NewGoCollector(), + ) + } + if innerHandler, err := h.innerHandler(); err != nil { + panic(fmt.Sprintf("Couldn't create metrics handler: %s", err)) + } else { + h.unfilteredHandler = innerHandler + } + return h +} + +// ServeHTTP implements http.Handler. +func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + filters := r.URL.Query()["collect"] + level.Debug(h.logger).Log("msg", "collect query:", "filters", fmt.Sprintf("%v", filters)) + + if len(filters) == 0 { + // No filters, use the prepared unfiltered handler. + h.unfilteredHandler.ServeHTTP(w, r) + return + } + // To serve filtered metrics, we create a filtering handler on the fly. + filteredHandler, err := h.innerHandler(filters...) + if err != nil { + level.Warn(h.logger).Log("msg", "Couldn't create filtered metrics handler:", "err", err) + w.WriteHeader(http.StatusBadRequest) + w.Write([]byte(fmt.Sprintf("Couldn't create filtered metrics handler: %s", err))) + return + } + filteredHandler.ServeHTTP(w, r) +} + +// innerHandler is used to create both the one unfiltered http.Handler to be +// wrapped by the outer handler and also the filtered handlers created on the +// fly. The former is accomplished by calling innerHandler without any arguments +// (in which case it will log all the collectors enabled via command-line +// flags). +func (h *handler) innerHandler(filters ...string) (http.Handler, error) { + nc, err := collector.NewJobCollector(h.logger, filters...) + if err != nil { + return nil, fmt.Errorf("couldn't create collector: %s", err) + } + + // Only log the creation of an unfiltered handler, which should happen + // only once upon startup. + if len(filters) == 0 { + level.Info(h.logger).Log("msg", "Enabled collectors") + collectors := []string{} + for n := range nc.Collectors { + collectors = append(collectors, n) + } + sort.Strings(collectors) + for _, c := range collectors { + level.Info(h.logger).Log("collector", c) + } + } + + r := prometheus.NewRegistry() + r.MustRegister(version.NewCollector("batchjob_exporter")) + if err := r.Register(nc); err != nil { + return nil, fmt.Errorf("couldn't register batch job collector: %s", err) + } + + var handler http.Handler + if h.includeExporterMetrics { + handler = promhttp.HandlerFor( + prometheus.Gatherers{h.exporterMetricsRegistry, r}, + promhttp.HandlerOpts{ + ErrorLog: stdlog.New(log.NewStdlibAdapter(level.Error(h.logger)), "", 0), + ErrorHandling: promhttp.ContinueOnError, + MaxRequestsInFlight: h.maxRequests, + Registry: h.exporterMetricsRegistry, + }, + ) + // Note that we have to use h.exporterMetricsRegistry here to + // use the same promhttp metrics for all expositions. + handler = promhttp.InstrumentMetricHandler( + h.exporterMetricsRegistry, handler, + ) + } else { + handler = promhttp.HandlerFor( + r, + promhttp.HandlerOpts{ + ErrorLog: stdlog.New(log.NewStdlibAdapter(level.Error(h.logger)), "", 0), + ErrorHandling: promhttp.ContinueOnError, + MaxRequestsInFlight: h.maxRequests, + }, + ) + } + + return handler, nil +} + +func main() { + var ( + metricsPath = kingpin.Flag( + "web.telemetry-path", + "Path under which to expose metrics.", + ).Default("/metrics").String() + disableExporterMetrics = kingpin.Flag( + "web.disable-exporter-metrics", + "Exclude metrics about the exporter itself (promhttp_*, process_*, go_*).", + ).Bool() + maxRequests = kingpin.Flag( + "web.max-requests", + "Maximum number of parallel scrape requests. Use 0 to disable.", + ).Default("40").Int() + disableDefaultCollectors = kingpin.Flag( + "collector.disable-defaults", + "Set all collectors to disabled by default.", + ).Default("false").Bool() + maxProcs = kingpin.Flag( + "runtime.gomaxprocs", "The target number of CPUs Go will run on (GOMAXPROCS)", + ).Envar("GOMAXPROCS").Default("1").Int() + toolkitFlags = kingpinflag.AddFlags(kingpin.CommandLine, ":9010") + ) + + promlogConfig := &promlog.Config{} + flag.AddFlags(kingpin.CommandLine, promlogConfig) + kingpin.Version(version.Print("batchjob_exporter")) + kingpin.CommandLine.UsageWriter(os.Stdout) + kingpin.HelpFlag.Short('h') + kingpin.Parse() + logger := promlog.New(promlogConfig) + + if *disableDefaultCollectors { + collector.DisableDefaultCollectors() + } + level.Info(logger).Log("msg", "Starting batchjob_exporter", "version", version.Info()) + level.Info(logger).Log("msg", "Build context", "build_context", version.BuildContext()) + if user, err := user.Current(); err == nil && user.Uid == "0" { + level.Warn(logger).Log("msg", "Batch Job Metrics Exporter is running as root user. This exporter is designed to run as unprivileged user, root is not required.") + } + runtime.GOMAXPROCS(*maxProcs) + level.Debug(logger).Log("msg", "Go MAXPROCS", "procs", runtime.GOMAXPROCS(0)) + + http.Handle(*metricsPath, newHandler(!*disableExporterMetrics, *maxRequests, logger)) + if *metricsPath != "/" { + landingConfig := web.LandingConfig{ + Name: "Batch Job Metrics", + Description: "Prometheus Batch Job Metrics", + Version: version.Info(), + Links: []web.LandingLinks{ + { + Address: *metricsPath, + Text: "Metrics", + }, + }, + } + landingPage, err := web.NewLandingPage(landingConfig) + if err != nil { + level.Error(logger).Log("err", err) + os.Exit(1) + } + http.Handle("/", landingPage) + } + + server := &http.Server{} + if err := web.ListenAndServe(server, toolkitFlags, logger); err != nil { + level.Error(logger).Log("err", err) + os.Exit(1) + } +} diff --git a/batchjob_exporter_test.go b/batchjob_exporter_test.go new file mode 100644 index 00000000..3457b20a --- /dev/null +++ b/batchjob_exporter_test.go @@ -0,0 +1,112 @@ +package main + +import ( + "fmt" + "io" + "net/http" + "os" + "os/exec" + "path/filepath" + "testing" + "time" + + "github.com/prometheus/procfs" +) + +var ( + binary, _ = filepath.Abs("batchjob_exporter") +) + +const ( + address = "localhost:19100" +) + +func TestFileDescriptorLeak(t *testing.T) { + if _, err := os.Stat(binary); err != nil { + t.Skipf("batchjob_exporter binary not available, try to run `make build` first: %s", err) + } + fs, err := procfs.NewDefaultFS() + if err != nil { + t.Skipf("proc filesystem is not available, but currently required to read number of open file descriptors: %s", err) + } + if _, err := fs.Stat(); err != nil { + t.Errorf("unable to read process stats: %s", err) + } + + exporter := exec.Command(binary, "--web.listen-address", address) + test := func(pid int) error { + if err := queryExporter(address); err != nil { + return err + } + proc, err := procfs.NewProc(pid) + if err != nil { + return err + } + fdsBefore, err := proc.FileDescriptors() + if err != nil { + return err + } + for i := 0; i < 5; i++ { + if err := queryExporter(address); err != nil { + return err + } + } + fdsAfter, err := proc.FileDescriptors() + if err != nil { + return err + } + if want, have := len(fdsBefore), len(fdsAfter); want != have { + return fmt.Errorf("want %d open file descriptors after metrics scrape, have %d", want, have) + } + return nil + } + + if err := runCommandAndTests(exporter, address, test); err != nil { + t.Error(err) + } +} + +func queryExporter(address string) error { + resp, err := http.Get(fmt.Sprintf("http://%s/metrics", address)) + if err != nil { + return err + } + b, err := io.ReadAll(resp.Body) + if err != nil { + return err + } + if err := resp.Body.Close(); err != nil { + return err + } + if want, have := http.StatusOK, resp.StatusCode; want != have { + return fmt.Errorf("want /metrics status code %d, have %d. Body:\n%s", want, have, b) + } + return nil +} + +func runCommandAndTests(cmd *exec.Cmd, address string, fn func(pid int) error) error { + if err := cmd.Start(); err != nil { + return fmt.Errorf("failed to start command: %s", err) + } + time.Sleep(50 * time.Millisecond) + for i := 0; i < 10; i++ { + if err := queryExporter(address); err == nil { + break + } + time.Sleep(500 * time.Millisecond) + if cmd.Process == nil || i == 9 { + return fmt.Errorf("can't start command") + } + } + + errc := make(chan error) + go func(pid int) { + errc <- fn(pid) + }(cmd.Process.Pid) + + err := <-errc + if cmd.Process != nil { + cmd.Process.Kill() + } + return err +} diff --git a/collector/collector.go b/collector/collector.go new file mode 100644 index 00000000..f4ea9629 --- /dev/null +++ b/collector/collector.go @@ -0,0 +1,178 @@ +// Package collector includes all individual collectors to gather and export SLURM job metrics. +package collector + +import ( + "errors" + "fmt" + "sync" + "time" + + "github.com/alecthomas/kingpin/v2" + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" +) + +// Namespace defines the common namespace to be used by all metrics. +const namespace = "batchjob" + +var ( + scrapeDurationDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "scrape", "collector_duration_seconds"), + "batchjob_exporter: Duration of a collector scrape.", + []string{"collector"}, + nil, + ) + scrapeSuccessDesc = prometheus.NewDesc( + prometheus.BuildFQName(namespace, "scrape", "collector_success"), + "batchjob_exporter: Whether a collector succeeded.", + []string{"collector"}, + nil, + ) +) + +const ( + defaultEnabled = true + defaultDisabled = false +) + +var ( + factories = make(map[string]func(logger log.Logger) (Collector, error)) + initiatedCollectorsMtx = sync.Mutex{} + initiatedCollectors = make(map[string]Collector) + collectorState = make(map[string]*bool) + forcedCollectors = map[string]bool{} // collectors which have been explicitly enabled or disabled +) + +func registerCollector(collector string, isDefaultEnabled bool, factory func(logger log.Logger) (Collector, error)) { + var helpDefaultState string + if isDefaultEnabled { + helpDefaultState = "enabled" + } else { + helpDefaultState = "disabled" + } + + flagName := fmt.Sprintf("collector.%s", collector) + flagHelp := fmt.Sprintf("Enable the %s collector (default: %s).", collector, helpDefaultState) + defaultValue := fmt.Sprintf("%v", isDefaultEnabled) + + flag := kingpin.Flag(flagName, flagHelp).Default(defaultValue).Action(collectorFlagAction(collector)).Bool() + collectorState[collector] = flag + + factories[collector] = factory +} + +// JobCollector implements the prometheus.Collector interface. +type JobCollector struct { + Collectors map[string]Collector + logger log.Logger +} + +// DisableDefaultCollectors sets the collector state to false for all collectors which +// have not been explicitly enabled on the command line. +func DisableDefaultCollectors() { + for c := range collectorState { + if _, ok := forcedCollectors[c]; !ok { + *collectorState[c] = false + } + } +} + +// collectorFlagAction generates a new action function for the given collector +// to track whether it has been explicitly enabled or disabled from the command line. +// A new action function is needed for each collector flag because the ParseContext +// does not contain information about which flag called the action. +// See: https://github.com/alecthomas/kingpin/issues/294 +func collectorFlagAction(collector string) func(ctx *kingpin.ParseContext) error { + return func(ctx *kingpin.ParseContext) error { + forcedCollectors[collector] = true + return nil + } +} + +// NewJobCollector creates a new JobCollector. +func NewJobCollector(logger log.Logger, filters ...string) (*JobCollector, error) { + f := make(map[string]bool) + for _, filter := range filters { + enabled, exist := collectorState[filter] + if !exist { + return nil, fmt.Errorf("missing collector: %s", filter) + } + if !*enabled { + return nil, fmt.Errorf("disabled collector: %s", filter) + } + f[filter] = true + } + collectors := make(map[string]Collector) + initiatedCollectorsMtx.Lock() + defer initiatedCollectorsMtx.Unlock() + for key, enabled := range collectorState { + if !*enabled || (len(f) > 0 && !f[key]) { + continue + } + if collector, ok := initiatedCollectors[key]; ok { + collectors[key] = collector + } else { + collector, err := factories[key](log.With(logger, "collector", key)) + if err != nil { + return nil, err + } + collectors[key] = collector + initiatedCollectors[key] = collector + } + } + return &JobCollector{Collectors: collectors, logger: logger}, nil +} + +// Describe implements the prometheus.Collector interface. +func (n JobCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- scrapeDurationDesc + ch <- scrapeSuccessDesc +} + +// Collect implements the prometheus.Collector interface. +func (n JobCollector) Collect(ch chan<- prometheus.Metric) { + wg := sync.WaitGroup{} + wg.Add(len(n.Collectors)) + for name, c := range n.Collectors { + go func(name string, c Collector) { + execute(name, c, ch, n.logger) + wg.Done() + }(name, c) + } + wg.Wait() +} + +func execute(name string, c Collector, ch chan<- prometheus.Metric, logger log.Logger) { + begin := time.Now() + err := c.Update(ch) + duration := time.Since(begin) + var success float64 + + if err != nil { + if IsNoDataError(err) { + level.Debug(logger).Log("msg", "collector returned no data", "name", name, "duration_seconds", duration.Seconds(), "err", err) + } else { + level.Error(logger).Log("msg", "collector failed", "name", name, "duration_seconds", duration.Seconds(), "err", err) + } + success = 0 + } else { + level.Debug(logger).Log("msg", "collector succeeded", "name", name, "duration_seconds", duration.Seconds()) + success = 1 + } + ch <- prometheus.MustNewConstMetric(scrapeDurationDesc, prometheus.GaugeValue, duration.Seconds(), name) + ch <- prometheus.MustNewConstMetric(scrapeSuccessDesc, prometheus.GaugeValue, success, name) +} + +// Collector is the interface a collector has to implement. +type Collector interface { + // Get new metrics and expose them via prometheus registry. + Update(ch chan<- prometheus.Metric) error +} + +// ErrNoData indicates the collector found no data to collect, but had no other error. +var ErrNoData = errors.New("collector returned no data") + +func IsNoDataError(err error) bool { + return err == ErrNoData +} diff --git a/collector/fixtures/e2e-test-output.txt b/collector/fixtures/e2e-test-output.txt new file mode 100644 index 00000000..aa37d90b --- /dev/null +++ b/collector/fixtures/e2e-test-output.txt @@ -0,0 +1,137 @@ +# HELP batchjob_cpu_system_seconds Cumulative CPU system seconds for jobid +# TYPE batchjob_cpu_system_seconds gauge +batchjob_cpu_system_seconds{batch="slurm",jobid="1009248",step="",task=""} 115.777502 +# HELP batchjob_cpu_total_seconds Cumulative CPU total seconds for jobid +# TYPE batchjob_cpu_total_seconds gauge +batchjob_cpu_total_seconds{batch="slurm",jobid="1009248",step="",task=""} 60491.070351 +# HELP batchjob_cpu_user_seconds Cumulative CPU user seconds for jobid +# TYPE batchjob_cpu_user_seconds gauge +batchjob_cpu_user_seconds{batch="slurm",jobid="1009248",step="",task=""} 60375.292848 +# HELP batchjob_cpus Number of CPUs in the jobid +# TYPE batchjob_cpus gauge +batchjob_cpus{batch="slurm",jobid="1009248",step="",task=""} 2 +# HELP batchjob_exporter_build_info A metric with a constant '1' value labeled by version, revision, branch, goversion from which batchjob_exporter was built, and the goos and goarch for the build. +# TYPE batchjob_exporter_build_info gauge +# HELP batchjob_ipmi_dcmi_watts_total Current Power consumption in watts +# TYPE batchjob_ipmi_dcmi_watts_total counter +batchjob_ipmi_dcmi_watts_total 332 +# HELP batchjob_memory_cache_bytes Memory cache used in bytes +# TYPE batchjob_memory_cache_bytes gauge +batchjob_memory_cache_bytes{batch="slurm",jobid="1009248",step="",task=""} 0 +# HELP batchjob_memory_fail_count Memory fail count +# TYPE batchjob_memory_fail_count gauge +batchjob_memory_fail_count{batch="slurm",jobid="1009248",step="",task=""} 0 +# HELP batchjob_memory_rss_bytes Memory RSS used in bytes +# TYPE batchjob_memory_rss_bytes gauge +batchjob_memory_rss_bytes{batch="slurm",jobid="1009248",step="",task=""} 4.098592768e+09 +# HELP batchjob_memory_total_bytes Memory total given to jobid in bytes +# TYPE batchjob_memory_total_bytes gauge +batchjob_memory_total_bytes{batch="slurm",jobid="1009248",step="",task=""} 4.294967296e+09 +# HELP batchjob_memory_used_bytes Memory used in bytes +# TYPE batchjob_memory_used_bytes gauge +batchjob_memory_used_bytes{batch="slurm",jobid="1009248",step="",task=""} 4.111491072e+09 +# HELP batchjob_memsw_fail_count Swap fail count +# TYPE batchjob_memsw_fail_count gauge +batchjob_memsw_fail_count{batch="slurm",jobid="1009248",step="",task=""} 0 +# HELP batchjob_memsw_total_bytes Swap total given to jobid in bytes +# TYPE batchjob_memsw_total_bytes gauge +batchjob_memsw_total_bytes{batch="slurm",jobid="1009248",step="",task=""} 0 +# HELP batchjob_memsw_used_bytes Swap used in bytes +# TYPE batchjob_memsw_used_bytes gauge +batchjob_memsw_used_bytes{batch="slurm",jobid="1009248",step="",task=""} 0 +# HELP batchjob_nvidia_gpu_jobid Batch Job ID of current nVIDIA GPU +# TYPE batchjob_nvidia_gpu_jobid gauge +batchjob_nvidia_gpu_jobid{uuid="GPU-61a65011-6571-a6d2-5ab8-66cbb6f7f9c3"} 11000 +batchjob_nvidia_gpu_jobid{uuid="GPU-f124aa59-d406-d45b-9481-8fcd694e6c9e"} 10000 +# HELP batchjob_rapl_package_joules_total Current RAPL package value in joules +# TYPE batchjob_rapl_package_joules_total counter +batchjob_rapl_package_joules_total{index="0",path="collector/fixtures/sys/class/powercap/intel-rapl:0"} 258218.293244 +batchjob_rapl_package_joules_total{index="1",path="collector/fixtures/sys/class/powercap/intel-rapl:1"} 130570.505826 +# HELP batchjob_scrape_collector_duration_seconds batchjob_exporter: Duration of a collector scrape. +# TYPE batchjob_scrape_collector_duration_seconds gauge +# HELP batchjob_scrape_collector_success batchjob_exporter: Whether a collector succeeded. +# TYPE batchjob_scrape_collector_success gauge +batchjob_scrape_collector_success{collector="ipmi_dcmi"} 1 +batchjob_scrape_collector_success{collector="nvidia_gpu"} 1 +batchjob_scrape_collector_success{collector="rapl"} 1 +batchjob_scrape_collector_success{collector="slurm_job"} 1 +# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles. +# TYPE go_gc_duration_seconds summary +# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +# HELP go_info Information about the Go environment. +# TYPE go_info gauge +# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. +# TYPE go_memstats_alloc_bytes gauge +# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. +# TYPE go_memstats_alloc_bytes_total counter +# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. +# TYPE go_memstats_buck_hash_sys_bytes gauge +# HELP go_memstats_frees_total Total number of frees. +# TYPE go_memstats_frees_total counter +# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. +# TYPE go_memstats_gc_sys_bytes gauge +# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. +# TYPE go_memstats_heap_alloc_bytes gauge +# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. +# TYPE go_memstats_heap_idle_bytes gauge +# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. +# TYPE go_memstats_heap_inuse_bytes gauge +# HELP go_memstats_heap_objects Number of allocated objects. +# TYPE go_memstats_heap_objects gauge +# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. +# TYPE go_memstats_heap_released_bytes gauge +# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. +# TYPE go_memstats_heap_sys_bytes gauge +# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. +# TYPE go_memstats_last_gc_time_seconds gauge +# HELP go_memstats_lookups_total Total number of pointer lookups. +# TYPE go_memstats_lookups_total counter +# HELP go_memstats_mallocs_total Total number of mallocs. +# TYPE go_memstats_mallocs_total counter +# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. +# TYPE go_memstats_mcache_inuse_bytes gauge +# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. +# TYPE go_memstats_mcache_sys_bytes gauge +# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. +# TYPE go_memstats_mspan_inuse_bytes gauge +# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. +# TYPE go_memstats_mspan_sys_bytes gauge +# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. +# TYPE go_memstats_next_gc_bytes gauge +# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. +# TYPE go_memstats_other_sys_bytes gauge +# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. +# TYPE go_memstats_stack_inuse_bytes gauge +# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. +# TYPE go_memstats_stack_sys_bytes gauge +# HELP go_memstats_sys_bytes Number of bytes obtained from system. +# TYPE go_memstats_sys_bytes gauge +# HELP go_threads Number of OS threads created. +# TYPE go_threads gauge +# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. +# TYPE process_cpu_seconds_total counter +# HELP process_max_fds Maximum number of open file descriptors. +# TYPE process_max_fds gauge +# HELP process_open_fds Number of open file descriptors. +# TYPE process_open_fds gauge +# HELP process_resident_memory_bytes Resident memory size in bytes. +# TYPE process_resident_memory_bytes gauge +# HELP process_start_time_seconds Start time of the process since unix epoch in seconds. +# TYPE process_start_time_seconds gauge +# HELP process_virtual_memory_bytes Virtual memory size in bytes. +# TYPE process_virtual_memory_bytes gauge +# HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes. +# TYPE process_virtual_memory_max_bytes gauge +# HELP promhttp_metric_handler_errors_total Total number of internal errors encountered by the promhttp metric handler. +# TYPE promhttp_metric_handler_errors_total counter +promhttp_metric_handler_errors_total{cause="encoding"} 0 +promhttp_metric_handler_errors_total{cause="gathering"} 0 +# HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served. +# TYPE promhttp_metric_handler_requests_in_flight gauge +promhttp_metric_handler_requests_in_flight 1 +# HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code. +# TYPE promhttp_metric_handler_requests_total counter +promhttp_metric_handler_requests_total{code="200"} 0 +promhttp_metric_handler_requests_total{code="500"} 0 +promhttp_metric_handler_requests_total{code="503"} 0 diff --git a/collector/fixtures/gpustat/GPU-61a65011-6571-a6d2-5ab8-66cbb6f7f9c3 b/collector/fixtures/gpustat/GPU-61a65011-6571-a6d2-5ab8-66cbb6f7f9c3 new file mode 100644 index 00000000..a7ab2112 --- /dev/null +++ b/collector/fixtures/gpustat/GPU-61a65011-6571-a6d2-5ab8-66cbb6f7f9c3 @@ -0,0 +1 @@ +11000 diff --git a/collector/fixtures/gpustat/GPU-f124aa59-d406-d45b-9481-8fcd694e6c9e b/collector/fixtures/gpustat/GPU-f124aa59-d406-d45b-9481-8fcd694e6c9e new file mode 100644 index 00000000..e4089838 --- /dev/null +++ b/collector/fixtures/gpustat/GPU-f124aa59-d406-d45b-9481-8fcd694e6c9e @@ -0,0 +1 @@ +10000 diff --git a/collector/fixtures/ipmi-dcmi-wrapper.sh b/collector/fixtures/ipmi-dcmi-wrapper.sh new file mode 100755 index 00000000..0f440b64 --- /dev/null +++ b/collector/fixtures/ipmi-dcmi-wrapper.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +echo """Current Power : 332 Watts +Minimum Power over sampling duration : 68 watts +Maximum Power over sampling duration : 504 watts +Average Power over sampling duration : 348 watts +Time Stamp : 11/03/2023 - 08:36:29 +Statistics reporting time period : 2685198000 milliseconds +Power Measurement : Active""" diff --git a/collector/fixtures/nvidia-smi b/collector/fixtures/nvidia-smi new file mode 100755 index 00000000..59b383fb --- /dev/null +++ b/collector/fixtures/nvidia-smi @@ -0,0 +1,5 @@ +#!/bin/bash + +echo """name, uuid +Tesla V100-SXM2-32GB, GPU-f124aa59-d406-d45b-9481-8fcd694e6c9e +Tesla V100-SXM2-32GB, GPU-61a65011-6571-a6d2-5ab8-66cbb6f7f9c3""" diff --git a/collector/fixtures/sys.ttar b/collector/fixtures/sys.ttar new file mode 100644 index 00000000..7f24bb27 --- /dev/null +++ b/collector/fixtures/sys.ttar @@ -0,0 +1,4367 @@ +# Archive created by ttar -C collector/fixtures -c -f collector/fixtures/sys.ttar sys +Directory: sys +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/powercap +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/enabled +Lines: 1 +1 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/powercap/intel-rapl:0 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/constraint_0_max_power_uw +Lines: 1 +150000000 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/constraint_0_name +Lines: 1 +long_term +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/constraint_0_power_limit_uw +Lines: 1 +150000000 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/constraint_0_time_window_us +Lines: 1 +55967744 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/constraint_1_max_power_uw +Lines: 1 +376000000 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/constraint_1_name +Lines: 1 +short_term +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/constraint_1_power_limit_uw +Lines: 1 +180000000 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/constraint_1_time_window_us +Lines: 1 +20468203520 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/device +SymlinkTo: ../../intel-rapl +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/enabled +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/energy_uj +Lines: 1 +258218293244 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/powercap/intel-rapl:0/intel-rapl:0:0 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/intel-rapl:0:0/constraint_0_max_power_uw +Lines: 1 +64500000 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/intel-rapl:0:0/constraint_0_name +Lines: 1 +long_term +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/intel-rapl:0:0/constraint_0_power_limit_uw +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/intel-rapl:0:0/constraint_0_time_window_us +Lines: 1 +976 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/intel-rapl:0:0/device +SymlinkTo: ../../intel-rapl:0 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/intel-rapl:0:0/enabled +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/intel-rapl:0:0/energy_uj +Lines: 1 +24468409791 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/intel-rapl:0:0/max_energy_range_uj +Lines: 1 +65712999613 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/intel-rapl:0:0/name +Lines: 1 +dram +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/powercap/intel-rapl:0/intel-rapl:0:0/power +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/intel-rapl:0:0/power/autosuspend_delay_ms +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/intel-rapl:0:0/power/control +Lines: 1 +auto +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/intel-rapl:0:0/power/runtime_active_time +Lines: 1 +0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/intel-rapl:0:0/power/runtime_status +Lines: 1 +unsupported +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/intel-rapl:0:0/power/runtime_suspended_time +Lines: 1 +0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/intel-rapl:0:0/subsystem +SymlinkTo: ../../../../../../class/powercap +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/intel-rapl:0:0/uevent +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/max_energy_range_uj +Lines: 1 +262143328850 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/name +Lines: 1 +package-0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/powercap/intel-rapl:0/power +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/power/autosuspend_delay_ms +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/power/control +Lines: 1 +auto +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/power/runtime_active_time +Lines: 1 +0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/power/runtime_status +Lines: 1 +unsupported +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/power/runtime_suspended_time +Lines: 1 +0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/subsystem +SymlinkTo: ../../../../../class/powercap +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:0/uevent +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/powercap/intel-rapl:1 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:1/constraint_0_max_power_uw +Lines: 1 +150000000 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:1/constraint_0_name +Lines: 1 +long_term +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:1/constraint_0_power_limit_uw +Lines: 1 +150000000 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:1/constraint_0_time_window_us +Lines: 1 +55967744 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:1/constraint_1_max_power_uw +Lines: 1 +376000000 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:1/constraint_1_name +Lines: 1 +short_term +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:1/constraint_1_power_limit_uw +Lines: 1 +180000000 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:1/constraint_1_time_window_us +Lines: 1 +20468203520 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:1/device +SymlinkTo: ../../intel-rapl +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:1/enabled +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:1/energy_uj +Lines: 1 +130570505826 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/powercap/intel-rapl:1/intel-rapl:1:0 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:1/intel-rapl:1:0/constraint_0_max_power_uw +Lines: 1 +64500000 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:1/intel-rapl:1:0/constraint_0_name +Lines: 1 +long_term +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:1/intel-rapl:1:0/constraint_0_power_limit_uw +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:1/intel-rapl:1:0/constraint_0_time_window_us +Lines: 1 +976 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:1/intel-rapl:1:0/device +SymlinkTo: ../../intel-rapl:1 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:1/intel-rapl:1:0/enabled +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:1/intel-rapl:1:0/energy_uj +Lines: 1 +64930394974 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:1/intel-rapl:1:0/max_energy_range_uj +Lines: 1 +65712999613 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:1/intel-rapl:1:0/name +Lines: 1 +dram +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/powercap/intel-rapl:1/intel-rapl:1:0/power +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:1/intel-rapl:1:0/power/autosuspend_delay_ms +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:1/intel-rapl:1:0/power/control +Lines: 1 +auto +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:1/intel-rapl:1:0/power/runtime_active_time +Lines: 1 +0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:1/intel-rapl:1:0/power/runtime_status +Lines: 1 +unsupported +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:1/intel-rapl:1:0/power/runtime_suspended_time +Lines: 1 +0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:1/intel-rapl:1:0/subsystem +SymlinkTo: ../../../../../../class/powercap +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:1/intel-rapl:1:0/uevent +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:1/max_energy_range_uj +Lines: 1 +262143328850 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:1/name +Lines: 1 +package-1 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/powercap/intel-rapl:1/power +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:1/power/autosuspend_delay_ms +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:1/power/control +Lines: 1 +auto +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:1/power/runtime_active_time +Lines: 1 +0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:1/power/runtime_status +Lines: 1 +unsupported +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:1/power/runtime_suspended_time +Lines: 1 +0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:1/subsystem +SymlinkTo: ../../../../../class/powercap +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/intel-rapl:1/uevent +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/class/powercap/power +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/power/autosuspend_delay_ms +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/power/control +Lines: 1 +auto +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/power/runtime_active_time +Lines: 1 +0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/power/runtime_status +Lines: 1 +unsupported +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/power/runtime_suspended_time +Lines: 1 +0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/subsystem +SymlinkTo: ../../../../class/powercap +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/class/powercap/uevent +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/fs +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/fs/cgroup +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/fs/cgroup/system.slice +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/fs/cgroup/system.slice/slurmstepd.scope +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/cgroup.controllers +Lines: 1 +cpuset cpu io memory pids +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/cgroup.events +Lines: 2 +populated 1 +frozen 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/cgroup.freeze +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/cgroup.max.depth +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/cgroup.max.descendants +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/cgroup.procs +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/cgroup.stat +Lines: 2 +nr_descendants 281 +nr_dying_descendants 6458 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/cgroup.subtree_control +Lines: 1 +cpuset cpu memory +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/cgroup.threads +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/cgroup.type +Lines: 1 +domain +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/cpu.max +Lines: 1 +max 100000 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/cpu.pressure +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/cpu.stat +Lines: 6 +usage_usec 63822839552729 +user_usec 63308516624494 +system_usec 514322928234 +nr_periods 0 +nr_throttled 0 +throttled_usec 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/cpu.weight +Lines: 1 +100 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/cpu.weight.nice +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/cpuset.cpus +Lines: 1 + +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/cpuset.cpus.effective +Lines: 1 +0-79 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/cpuset.cpus.partition +Lines: 1 +member +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/cpuset.mems +Lines: 1 + +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/cpuset.mems.effective +Lines: 1 +0-1 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/io.bfq.weight +Lines: 1 +default 100 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/io.latency +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/io.max +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/io.pressure +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/io.stat +Lines: 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/cgroup.controllers +Lines: 1 +cpuset cpu memory +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/cgroup.events +Lines: 2 +populated 1 +frozen 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/cgroup.freeze +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/cgroup.max.depth +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/cgroup.max.descendants +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/cgroup.procs +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/cgroup.stat +Lines: 2 +nr_descendants 12 +nr_dying_descendants 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/cgroup.subtree_control +Lines: 1 +cpuset cpu memory +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/cgroup.threads +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/cgroup.type +Lines: 1 +domain +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/cpu.max +Lines: 1 +max 100000 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/cpu.pressure +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/cpu.stat +Lines: 6 +usage_usec 60491070351 +user_usec 60375292848 +system_usec 115777502 +nr_periods 0 +nr_throttled 0 +throttled_usec 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/cpu.weight +Lines: 1 +100 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/cpu.weight.nice +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/cpuset.cpus +Lines: 1 +1,41 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/cpuset.cpus.effective +Lines: 1 +1,41 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/cpuset.cpus.partition +Lines: 1 +member +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/cpuset.mems +Lines: 1 +0-1 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/cpuset.mems.effective +Lines: 1 +0-1 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/io.pressure +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/memory.current +Lines: 1 +4111491072 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/memory.events +Lines: 5 +low 0 +high 0 +max 0 +oom 0 +oom_kill 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/memory.events.local +Lines: 5 +low 0 +high 0 +max 0 +oom 0 +oom_kill 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/memory.high +Lines: 1 +4294967296 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/memory.low +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/memory.max +Lines: 1 +4294967296 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/memory.min +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/memory.numa_stat +Lines: 26 +anon N0=4098330624 N1=262144 +file N0=0 N1=0 +kernel_stack N0=180224 N1=0 +pagetables N0=8601600 N1=0 +shmem N0=0 N1=0 +file_mapped N0=0 N1=0 +file_dirty N0=0 N1=0 +file_writeback N0=0 N1=0 +swapcached N0=0 N1=0 +anon_thp N0=4078960640 N1=0 +file_thp N0=0 N1=0 +shmem_thp N0=0 N1=0 +inactive_anon N0=4098273280 N1=262144 +active_anon N0=57344 N1=0 +inactive_file N0=0 N1=0 +active_file N0=0 N1=0 +unevictable N0=0 N1=0 +slab_reclaimable N0=89456 N1=43552 +slab_unreclaimable N0=348552 N1=64104 +workingset_refault_anon N0=0 N1=0 +workingset_refault_file N0=0 N1=0 +workingset_activate_anon N0=0 N1=0 +workingset_activate_file N0=0 N1=0 +workingset_restore_anon N0=0 N1=0 +workingset_restore_file N0=0 N1=0 +workingset_nodereclaim N0=0 N1=0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/memory.oom.group +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/memory.pressure +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/memory.stat +Lines: 40 +anon 4098592768 +file 0 +kernel_stack 180224 +pagetables 8601600 +percpu 3333120 +sock 0 +shmem 0 +file_mapped 0 +file_dirty 0 +file_writeback 0 +swapcached 0 +anon_thp 4078960640 +file_thp 0 +shmem_thp 0 +inactive_anon 4098535424 +active_anon 57344 +inactive_file 0 +active_file 0 +unevictable 0 +slab_reclaimable 133008 +slab_unreclaimable 412656 +slab 545664 +workingset_refault_anon 0 +workingset_refault_file 0 +workingset_activate_anon 0 +workingset_activate_file 0 +workingset_restore_anon 0 +workingset_restore_file 0 +workingset_nodereclaim 0 +pgfault 3087490 +pgmajfault 0 +pgrefill 0 +pgscan 0 +pgsteal 0 +pgactivate 150 +pgdeactivate 0 +pglazyfree 0 +pglazyfreed 0 +thp_fault_alloc 295220 +thp_collapse_alloc 8 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/memory.swap.current +Lines: 1 +0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/memory.swap.events +Lines: 3 +high 0 +max 0 +fail 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/memory.swap.high +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/memory.swap.max +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/cgroup.controllers +Lines: 1 +cpuset cpu memory +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/cgroup.events +Lines: 2 +populated 1 +frozen 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/cgroup.freeze +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/cgroup.max.depth +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/cgroup.max.descendants +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/cgroup.procs +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/cgroup.stat +Lines: 2 +nr_descendants 3 +nr_dying_descendants 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/cgroup.subtree_control +Lines: 1 +cpuset cpu memory +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/cgroup.threads +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/cgroup.type +Lines: 1 +domain +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/cpu.max +Lines: 1 +max 100000 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/cpu.pressure +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/cpu.stat +Lines: 6 +usage_usec 2023506360 +user_usec 2019162141 +system_usec 4344219 +nr_periods 0 +nr_throttled 0 +throttled_usec 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/cpu.weight +Lines: 1 +100 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/cpu.weight.nice +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/cpuset.cpus +Lines: 1 + +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/cpuset.cpus.effective +Lines: 1 +1,41 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/cpuset.cpus.partition +Lines: 1 +member +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/cpuset.mems +Lines: 1 + +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/cpuset.mems.effective +Lines: 1 +0-1 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/io.pressure +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/memory.current +Lines: 1 +4101341184 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/memory.events +Lines: 5 +low 0 +high 0 +max 0 +oom 0 +oom_kill 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/memory.events.local +Lines: 5 +low 0 +high 0 +max 0 +oom 0 +oom_kill 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/memory.high +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/memory.low +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/memory.max +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/memory.min +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/memory.numa_stat +Lines: 26 +anon N0=4091998208 N1=65536 +file N0=0 N1=0 +kernel_stack N0=49152 N1=0 +pagetables N0=8134656 N1=0 +shmem N0=0 N1=0 +file_mapped N0=0 N1=0 +file_dirty N0=0 N1=0 +file_writeback N0=0 N1=0 +swapcached N0=0 N1=0 +anon_thp N0=4078960640 N1=0 +file_thp N0=0 N1=0 +shmem_thp N0=0 N1=0 +inactive_anon N0=4091977728 N1=65536 +active_anon N0=20480 N1=0 +inactive_file N0=0 N1=0 +active_file N0=0 N1=0 +unevictable N0=0 N1=0 +slab_reclaimable N0=8456 N1=10784 +slab_unreclaimable N0=131184 N1=7920 +workingset_refault_anon N0=0 N1=0 +workingset_refault_file N0=0 N1=0 +workingset_activate_anon N0=0 N1=0 +workingset_activate_file N0=0 N1=0 +workingset_restore_anon N0=0 N1=0 +workingset_restore_file N0=0 N1=0 +workingset_nodereclaim N0=0 N1=0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/memory.oom.group +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/memory.pressure +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/memory.stat +Lines: 40 +anon 4092063744 +file 0 +kernel_stack 49152 +pagetables 8134656 +percpu 833280 +sock 0 +shmem 0 +file_mapped 0 +file_dirty 0 +file_writeback 0 +swapcached 0 +anon_thp 4078960640 +file_thp 0 +shmem_thp 0 +inactive_anon 4092043264 +active_anon 20480 +inactive_file 0 +active_file 0 +unevictable 0 +slab_reclaimable 19240 +slab_unreclaimable 139104 +slab 158344 +workingset_refault_anon 0 +workingset_refault_file 0 +workingset_activate_anon 0 +workingset_activate_file 0 +workingset_restore_anon 0 +workingset_restore_file 0 +workingset_nodereclaim 0 +pgfault 22109 +pgmajfault 0 +pgrefill 0 +pgscan 0 +pgsteal 0 +pgactivate 1 +pgdeactivate 0 +pglazyfree 0 +pglazyfreed 0 +thp_fault_alloc 11561 +thp_collapse_alloc 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/memory.swap.current +Lines: 1 +0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/memory.swap.events +Lines: 3 +high 0 +max 0 +fail 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/memory.swap.high +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/memory.swap.max +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/slurm +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/slurm/cgroup.controllers +Lines: 1 +cpuset cpu memory +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/slurm/cgroup.events +Lines: 2 +populated 1 +frozen 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/slurm/cgroup.freeze +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/slurm/cgroup.max.depth +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/slurm/cgroup.max.descendants +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/slurm/cgroup.procs +Lines: 1 +3401149 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/slurm/cgroup.stat +Lines: 2 +nr_descendants 0 +nr_dying_descendants 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/slurm/cgroup.subtree_control +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/slurm/cgroup.threads +Lines: 5 +3401149 +3401150 +3401151 +3401152 +3401153 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/slurm/cgroup.type +Lines: 1 +domain +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/slurm/cpu.max +Lines: 1 +max 100000 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/slurm/cpu.pressure +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/slurm/cpu.stat +Lines: 6 +usage_usec 36117 +user_usec 0 +system_usec 36117 +nr_periods 0 +nr_throttled 0 +throttled_usec 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/slurm/cpu.weight +Lines: 1 +100 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/slurm/cpu.weight.nice +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/slurm/cpuset.cpus +Lines: 1 + +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/slurm/cpuset.cpus.effective +Lines: 1 +1,41 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/slurm/cpuset.cpus.partition +Lines: 1 +member +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/slurm/cpuset.mems +Lines: 1 + +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/slurm/cpuset.mems.effective +Lines: 1 +0-1 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/slurm/io.pressure +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/slurm/memory.current +Lines: 1 +331776 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/slurm/memory.events +Lines: 5 +low 0 +high 0 +max 0 +oom 0 +oom_kill 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/slurm/memory.events.local +Lines: 5 +low 0 +high 0 +max 0 +oom 0 +oom_kill 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/slurm/memory.high +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/slurm/memory.low +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/slurm/memory.max +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/slurm/memory.min +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/slurm/memory.numa_stat +Lines: 26 +anon N0=217088 N1=4096 +file N0=0 N1=0 +kernel_stack N0=32768 N1=0 +pagetables N0=12288 N1=0 +shmem N0=0 N1=0 +file_mapped N0=0 N1=0 +file_dirty N0=0 N1=0 +file_writeback N0=0 N1=0 +swapcached N0=0 N1=0 +anon_thp N0=0 N1=0 +file_thp N0=0 N1=0 +shmem_thp N0=0 N1=0 +inactive_anon N0=212992 N1=4096 +active_anon N0=4096 N1=0 +inactive_file N0=0 N1=0 +active_file N0=0 N1=0 +unevictable N0=0 N1=0 +slab_reclaimable N0=4656 N1=10784 +slab_unreclaimable N0=34616 N1=5712 +workingset_refault_anon N0=0 N1=0 +workingset_refault_file N0=0 N1=0 +workingset_activate_anon N0=0 N1=0 +workingset_activate_file N0=0 N1=0 +workingset_restore_anon N0=0 N1=0 +workingset_restore_file N0=0 N1=0 +workingset_nodereclaim N0=0 N1=0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/slurm/memory.oom.group +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/slurm/memory.pressure +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/slurm/memory.stat +Lines: 40 +anon 221184 +file 0 +kernel_stack 32768 +pagetables 12288 +percpu 0 +sock 0 +shmem 0 +file_mapped 0 +file_dirty 0 +file_writeback 0 +swapcached 0 +anon_thp 0 +file_thp 0 +shmem_thp 0 +inactive_anon 217088 +active_anon 4096 +inactive_file 0 +active_file 0 +unevictable 0 +slab_reclaimable 15440 +slab_unreclaimable 40328 +slab 55768 +workingset_refault_anon 0 +workingset_refault_file 0 +workingset_activate_anon 0 +workingset_activate_file 0 +workingset_restore_anon 0 +workingset_restore_file 0 +workingset_nodereclaim 0 +pgfault 178 +pgmajfault 0 +pgrefill 0 +pgscan 0 +pgsteal 0 +pgactivate 1 +pgdeactivate 0 +pglazyfree 0 +pglazyfreed 0 +thp_fault_alloc 0 +thp_collapse_alloc 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/slurm/memory.swap.current +Lines: 1 +0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/slurm/memory.swap.events +Lines: 3 +high 0 +max 0 +fail 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/slurm/memory.swap.high +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/slurm/memory.swap.max +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/cgroup.controllers +Lines: 1 +cpuset cpu memory +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/cgroup.events +Lines: 2 +populated 1 +frozen 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/cgroup.freeze +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/cgroup.max.depth +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/cgroup.max.descendants +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/cgroup.procs +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/cgroup.stat +Lines: 2 +nr_descendants 1 +nr_dying_descendants 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/cgroup.subtree_control +Lines: 1 +cpuset cpu memory +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/cgroup.threads +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/cgroup.type +Lines: 1 +domain +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/cpu.max +Lines: 1 +max 100000 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/cpu.pressure +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/cpu.stat +Lines: 6 +usage_usec 2023493168 +user_usec 2019156803 +system_usec 4336365 +nr_periods 0 +nr_throttled 0 +throttled_usec 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/cpu.weight +Lines: 1 +100 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/cpu.weight.nice +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/cpuset.cpus +Lines: 1 +1,41 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/cpuset.cpus.effective +Lines: 1 +1,41 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/cpuset.cpus.partition +Lines: 1 +member +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/cpuset.mems +Lines: 1 +0-1 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/cpuset.mems.effective +Lines: 1 +0-1 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/io.pressure +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/memory.current +Lines: 1 +4100452352 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/memory.events +Lines: 5 +low 0 +high 0 +max 0 +oom 0 +oom_kill 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/memory.events.local +Lines: 5 +low 0 +high 0 +max 0 +oom 0 +oom_kill 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/memory.high +Lines: 1 +4294967296 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/memory.low +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/memory.max +Lines: 1 +4294967296 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/memory.min +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/memory.numa_stat +Lines: 26 +anon N0=4091781120 N1=61440 +file N0=0 N1=0 +kernel_stack N0=16384 N1=0 +pagetables N0=8122368 N1=0 +shmem N0=0 N1=0 +file_mapped N0=0 N1=0 +file_dirty N0=0 N1=0 +file_writeback N0=0 N1=0 +swapcached N0=0 N1=0 +anon_thp N0=4078960640 N1=0 +file_thp N0=0 N1=0 +shmem_thp N0=0 N1=0 +inactive_anon N0=4091764736 N1=61440 +active_anon N0=16384 N1=0 +inactive_file N0=0 N1=0 +active_file N0=0 N1=0 +unevictable N0=0 N1=0 +slab_reclaimable N0=3800 N1=0 +slab_unreclaimable N0=96568 N1=2208 +workingset_refault_anon N0=0 N1=0 +workingset_refault_file N0=0 N1=0 +workingset_activate_anon N0=0 N1=0 +workingset_activate_file N0=0 N1=0 +workingset_restore_anon N0=0 N1=0 +workingset_restore_file N0=0 N1=0 +workingset_nodereclaim N0=0 N1=0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/memory.oom.group +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/memory.pressure +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/memory.stat +Lines: 40 +anon 4091842560 +file 0 +kernel_stack 16384 +pagetables 8122368 +percpu 277760 +sock 0 +shmem 0 +file_mapped 0 +file_dirty 0 +file_writeback 0 +swapcached 0 +anon_thp 4078960640 +file_thp 0 +shmem_thp 0 +inactive_anon 4091826176 +active_anon 16384 +inactive_file 0 +active_file 0 +unevictable 0 +slab_reclaimable 3800 +slab_unreclaimable 98776 +slab 102576 +workingset_refault_anon 0 +workingset_refault_file 0 +workingset_activate_anon 0 +workingset_activate_file 0 +workingset_restore_anon 0 +workingset_restore_file 0 +workingset_nodereclaim 0 +pgfault 21931 +pgmajfault 0 +pgrefill 0 +pgscan 0 +pgsteal 0 +pgactivate 0 +pgdeactivate 0 +pglazyfree 0 +pglazyfreed 0 +thp_fault_alloc 11561 +thp_collapse_alloc 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/memory.swap.current +Lines: 1 +0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/memory.swap.events +Lines: 3 +high 0 +max 0 +fail 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/memory.swap.high +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/memory.swap.max +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/task_0 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/task_0/cgroup.controllers +Lines: 1 +cpuset cpu memory +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/task_0/cgroup.events +Lines: 2 +populated 1 +frozen 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/task_0/cgroup.freeze +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/task_0/cgroup.max.depth +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/task_0/cgroup.max.descendants +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/task_0/cgroup.procs +Lines: 2 +3401154 +3401163 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/task_0/cgroup.stat +Lines: 2 +nr_descendants 0 +nr_dying_descendants 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/task_0/cgroup.subtree_control +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/task_0/cgroup.threads +Lines: 2 +3401154 +3401163 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/task_0/cgroup.type +Lines: 1 +domain +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/task_0/cpu.max +Lines: 1 +max 100000 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/task_0/cpu.pressure +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/task_0/cpu.stat +Lines: 6 +usage_usec 2023523056 +user_usec 2019186691 +system_usec 4336365 +nr_periods 0 +nr_throttled 0 +throttled_usec 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/task_0/cpu.weight +Lines: 1 +100 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/task_0/cpu.weight.nice +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/task_0/cpuset.cpus +Lines: 1 + +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/task_0/cpuset.cpus.effective +Lines: 1 +1,41 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/task_0/cpuset.cpus.partition +Lines: 1 +member +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/task_0/cpuset.mems +Lines: 1 + +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/task_0/cpuset.mems.effective +Lines: 1 +0-1 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/task_0/io.pressure +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/task_0/memory.current +Lines: 1 +4100173824 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/task_0/memory.events +Lines: 5 +low 0 +high 0 +max 0 +oom 0 +oom_kill 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/task_0/memory.events.local +Lines: 5 +low 0 +high 0 +max 0 +oom 0 +oom_kill 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/task_0/memory.high +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/task_0/memory.low +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/task_0/memory.max +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/task_0/memory.min +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/task_0/memory.numa_stat +Lines: 26 +anon N0=4091781120 N1=61440 +file N0=0 N1=0 +kernel_stack N0=16384 N1=0 +pagetables N0=8122368 N1=0 +shmem N0=0 N1=0 +file_mapped N0=0 N1=0 +file_dirty N0=0 N1=0 +file_writeback N0=0 N1=0 +swapcached N0=0 N1=0 +anon_thp N0=4078960640 N1=0 +file_thp N0=0 N1=0 +shmem_thp N0=0 N1=0 +inactive_anon N0=4091764736 N1=61440 +active_anon N0=16384 N1=0 +inactive_file N0=0 N1=0 +active_file N0=0 N1=0 +unevictable N0=0 N1=0 +slab_reclaimable N0=3800 N1=0 +slab_unreclaimable N0=96568 N1=2208 +workingset_refault_anon N0=0 N1=0 +workingset_refault_file N0=0 N1=0 +workingset_activate_anon N0=0 N1=0 +workingset_activate_file N0=0 N1=0 +workingset_restore_anon N0=0 N1=0 +workingset_restore_file N0=0 N1=0 +workingset_nodereclaim N0=0 N1=0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/task_0/memory.oom.group +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/task_0/memory.pressure +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/task_0/memory.stat +Lines: 40 +anon 4091842560 +file 0 +kernel_stack 16384 +pagetables 8122368 +percpu 0 +sock 0 +shmem 0 +file_mapped 0 +file_dirty 0 +file_writeback 0 +swapcached 0 +anon_thp 4078960640 +file_thp 0 +shmem_thp 0 +inactive_anon 4091826176 +active_anon 16384 +inactive_file 0 +active_file 0 +unevictable 0 +slab_reclaimable 3800 +slab_unreclaimable 98776 +slab 102576 +workingset_refault_anon 0 +workingset_refault_file 0 +workingset_activate_anon 0 +workingset_activate_file 0 +workingset_restore_anon 0 +workingset_restore_file 0 +workingset_nodereclaim 0 +pgfault 21931 +pgmajfault 0 +pgrefill 0 +pgscan 0 +pgsteal 0 +pgactivate 0 +pgdeactivate 0 +pglazyfree 0 +pglazyfreed 0 +thp_fault_alloc 11561 +thp_collapse_alloc 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/task_0/memory.swap.current +Lines: 1 +0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/task_0/memory.swap.events +Lines: 3 +high 0 +max 0 +fail 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/task_0/memory.swap.high +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_3/user/task_0/memory.swap.max +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/cgroup.controllers +Lines: 1 +cpuset cpu memory +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/cgroup.events +Lines: 2 +populated 1 +frozen 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/cgroup.freeze +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/cgroup.max.depth +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/cgroup.max.descendants +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/cgroup.procs +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/cgroup.stat +Lines: 2 +nr_descendants 3 +nr_dying_descendants 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/cgroup.subtree_control +Lines: 1 +cpuset cpu memory +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/cgroup.threads +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/cgroup.type +Lines: 1 +domain +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/cpu.max +Lines: 1 +max 100000 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/cpu.pressure +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/cpu.stat +Lines: 6 +usage_usec 52535205 +user_usec 50469999 +system_usec 2065206 +nr_periods 0 +nr_throttled 0 +throttled_usec 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/cpu.weight +Lines: 1 +100 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/cpu.weight.nice +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/cpuset.cpus +Lines: 1 + +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/cpuset.cpus.effective +Lines: 1 +1,41 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/cpuset.cpus.partition +Lines: 1 +member +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/cpuset.mems +Lines: 1 + +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/cpuset.mems.effective +Lines: 1 +0-1 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/io.pressure +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/memory.current +Lines: 1 +8110080 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/memory.events +Lines: 5 +low 0 +high 0 +max 0 +oom 0 +oom_kill 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/memory.events.local +Lines: 5 +low 0 +high 0 +max 0 +oom 0 +oom_kill 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/memory.high +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/memory.low +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/memory.max +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/memory.min +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/memory.numa_stat +Lines: 26 +anon N0=6184960 N1=163840 +file N0=0 N1=0 +kernel_stack N0=114688 N1=0 +pagetables N0=413696 N1=0 +shmem N0=0 N1=0 +file_mapped N0=0 N1=0 +file_dirty N0=0 N1=0 +file_writeback N0=0 N1=0 +swapcached N0=0 N1=0 +anon_thp N0=0 N1=0 +file_thp N0=0 N1=0 +shmem_thp N0=0 N1=0 +inactive_anon N0=6152192 N1=163840 +active_anon N0=32768 N1=0 +inactive_file N0=0 N1=0 +active_file N0=0 N1=0 +unevictable N0=0 N1=0 +slab_reclaimable N0=38128 N1=19592 +slab_unreclaimable N0=197576 N1=48936 +workingset_refault_anon N0=0 N1=0 +workingset_refault_file N0=0 N1=0 +workingset_activate_anon N0=0 N1=0 +workingset_activate_file N0=0 N1=0 +workingset_restore_anon N0=0 N1=0 +workingset_restore_file N0=0 N1=0 +workingset_nodereclaim N0=0 N1=0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/memory.oom.group +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/memory.pressure +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/memory.stat +Lines: 40 +anon 6348800 +file 0 +kernel_stack 114688 +pagetables 413696 +percpu 833280 +sock 0 +shmem 0 +file_mapped 0 +file_dirty 0 +file_writeback 0 +swapcached 0 +anon_thp 0 +file_thp 0 +shmem_thp 0 +inactive_anon 6316032 +active_anon 32768 +inactive_file 0 +active_file 0 +unevictable 0 +slab_reclaimable 57720 +slab_unreclaimable 246512 +slab 304232 +workingset_refault_anon 0 +workingset_refault_file 0 +workingset_activate_anon 0 +workingset_activate_file 0 +workingset_restore_anon 0 +workingset_restore_file 0 +workingset_nodereclaim 0 +pgfault 110726 +pgmajfault 0 +pgrefill 0 +pgscan 0 +pgsteal 0 +pgactivate 145 +pgdeactivate 0 +pglazyfree 0 +pglazyfreed 0 +thp_fault_alloc 182 +thp_collapse_alloc 1 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/memory.swap.current +Lines: 1 +0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/memory.swap.events +Lines: 3 +high 0 +max 0 +fail 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/memory.swap.high +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/memory.swap.max +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/slurm +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/slurm/cgroup.controllers +Lines: 1 +cpuset cpu memory +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/slurm/cgroup.events +Lines: 2 +populated 1 +frozen 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/slurm/cgroup.freeze +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/slurm/cgroup.max.depth +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/slurm/cgroup.max.descendants +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/slurm/cgroup.procs +Lines: 1 +3346567 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/slurm/cgroup.stat +Lines: 2 +nr_descendants 0 +nr_dying_descendants 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/slurm/cgroup.subtree_control +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/slurm/cgroup.threads +Lines: 4 +3346567 +3346573 +3346574 +3346577 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/slurm/cgroup.type +Lines: 1 +domain +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/slurm/cpu.max +Lines: 1 +max 100000 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/slurm/cpu.pressure +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/slurm/cpu.stat +Lines: 6 +usage_usec 502878 +user_usec 0 +system_usec 502878 +nr_periods 0 +nr_throttled 0 +throttled_usec 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/slurm/cpu.weight +Lines: 1 +100 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/slurm/cpu.weight.nice +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/slurm/cpuset.cpus +Lines: 1 + +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/slurm/cpuset.cpus.effective +Lines: 1 +1,41 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/slurm/cpuset.cpus.partition +Lines: 1 +member +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/slurm/cpuset.mems +Lines: 1 + +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/slurm/cpuset.mems.effective +Lines: 1 +0-1 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/slurm/io.pressure +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/slurm/memory.current +Lines: 1 +266240 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/slurm/memory.events +Lines: 5 +low 0 +high 0 +max 0 +oom 0 +oom_kill 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/slurm/memory.events.local +Lines: 5 +low 0 +high 0 +max 0 +oom 0 +oom_kill 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/slurm/memory.high +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/slurm/memory.low +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/slurm/memory.max +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/slurm/memory.min +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/slurm/memory.numa_stat +Lines: 26 +anon N0=196608 N1=4096 +file N0=0 N1=0 +kernel_stack N0=16384 N1=0 +pagetables N0=8192 N1=0 +shmem N0=0 N1=0 +file_mapped N0=0 N1=0 +file_dirty N0=0 N1=0 +file_writeback N0=0 N1=0 +swapcached N0=0 N1=0 +anon_thp N0=0 N1=0 +file_thp N0=0 N1=0 +shmem_thp N0=0 N1=0 +inactive_anon N0=192512 N1=4096 +active_anon N0=4096 N1=0 +inactive_file N0=0 N1=0 +active_file N0=0 N1=0 +unevictable N0=0 N1=0 +slab_reclaimable N0=7080 N1=9000 +slab_unreclaimable N0=18840 N1=3752 +workingset_refault_anon N0=0 N1=0 +workingset_refault_file N0=0 N1=0 +workingset_activate_anon N0=0 N1=0 +workingset_activate_file N0=0 N1=0 +workingset_restore_anon N0=0 N1=0 +workingset_restore_file N0=0 N1=0 +workingset_nodereclaim N0=0 N1=0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/slurm/memory.oom.group +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/slurm/memory.pressure +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/slurm/memory.stat +Lines: 40 +anon 200704 +file 0 +kernel_stack 16384 +pagetables 8192 +percpu 0 +sock 0 +shmem 0 +file_mapped 0 +file_dirty 0 +file_writeback 0 +swapcached 0 +anon_thp 0 +file_thp 0 +shmem_thp 0 +inactive_anon 196608 +active_anon 4096 +inactive_file 0 +active_file 0 +unevictable 0 +slab_reclaimable 16080 +slab_unreclaimable 22592 +slab 38672 +workingset_refault_anon 0 +workingset_refault_file 0 +workingset_activate_anon 0 +workingset_activate_file 0 +workingset_restore_anon 0 +workingset_restore_file 0 +workingset_nodereclaim 0 +pgfault 150 +pgmajfault 0 +pgrefill 0 +pgscan 0 +pgsteal 0 +pgactivate 1 +pgdeactivate 0 +pglazyfree 0 +pglazyfreed 0 +thp_fault_alloc 0 +thp_collapse_alloc 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/slurm/memory.swap.current +Lines: 1 +0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/slurm/memory.swap.events +Lines: 3 +high 0 +max 0 +fail 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/slurm/memory.swap.high +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/slurm/memory.swap.max +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/cgroup.controllers +Lines: 1 +cpuset cpu memory +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/cgroup.events +Lines: 2 +populated 1 +frozen 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/cgroup.freeze +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/cgroup.max.depth +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/cgroup.max.descendants +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/cgroup.procs +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/cgroup.stat +Lines: 2 +nr_descendants 1 +nr_dying_descendants 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/cgroup.subtree_control +Lines: 1 +cpuset cpu memory +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/cgroup.threads +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/cgroup.type +Lines: 1 +domain +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/cpu.max +Lines: 1 +max 100000 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/cpu.pressure +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/cpu.stat +Lines: 6 +usage_usec 52032326 +user_usec 49992531 +system_usec 2039795 +nr_periods 0 +nr_throttled 0 +throttled_usec 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/cpu.weight +Lines: 1 +100 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/cpu.weight.nice +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/cpuset.cpus +Lines: 1 +1,41 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/cpuset.cpus.effective +Lines: 1 +1,41 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/cpuset.cpus.partition +Lines: 1 +member +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/cpuset.mems +Lines: 1 +0-1 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/cpuset.mems.effective +Lines: 1 +0-1 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/io.pressure +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/memory.current +Lines: 1 +7286784 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/memory.events +Lines: 5 +low 0 +high 0 +max 0 +oom 0 +oom_kill 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/memory.events.local +Lines: 5 +low 0 +high 0 +max 0 +oom 0 +oom_kill 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/memory.high +Lines: 1 +4294967296 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/memory.low +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/memory.max +Lines: 1 +4294967296 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/memory.min +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/memory.numa_stat +Lines: 26 +anon N0=5988352 N1=159744 +file N0=0 N1=0 +kernel_stack N0=98304 N1=0 +pagetables N0=405504 N1=0 +shmem N0=0 N1=0 +file_mapped N0=0 N1=0 +file_dirty N0=0 N1=0 +file_writeback N0=0 N1=0 +swapcached N0=0 N1=0 +anon_thp N0=0 N1=0 +file_thp N0=0 N1=0 +shmem_thp N0=0 N1=0 +inactive_anon N0=5959680 N1=159744 +active_anon N0=28672 N1=0 +inactive_file N0=0 N1=0 +active_file N0=0 N1=0 +unevictable N0=0 N1=0 +slab_reclaimable N0=31048 N1=10592 +slab_unreclaimable N0=178736 N1=45184 +workingset_refault_anon N0=0 N1=0 +workingset_refault_file N0=0 N1=0 +workingset_activate_anon N0=0 N1=0 +workingset_activate_file N0=0 N1=0 +workingset_restore_anon N0=0 N1=0 +workingset_restore_file N0=0 N1=0 +workingset_nodereclaim N0=0 N1=0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/memory.oom.group +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/memory.pressure +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/memory.stat +Lines: 40 +anon 6148096 +file 0 +kernel_stack 98304 +pagetables 405504 +percpu 277760 +sock 0 +shmem 0 +file_mapped 0 +file_dirty 0 +file_writeback 0 +swapcached 0 +anon_thp 0 +file_thp 0 +shmem_thp 0 +inactive_anon 6119424 +active_anon 28672 +inactive_file 0 +active_file 0 +unevictable 0 +slab_reclaimable 41640 +slab_unreclaimable 223920 +slab 265560 +workingset_refault_anon 0 +workingset_refault_file 0 +workingset_activate_anon 0 +workingset_activate_file 0 +workingset_restore_anon 0 +workingset_restore_file 0 +workingset_nodereclaim 0 +pgfault 110576 +pgmajfault 0 +pgrefill 0 +pgscan 0 +pgsteal 0 +pgactivate 144 +pgdeactivate 0 +pglazyfree 0 +pglazyfreed 0 +thp_fault_alloc 182 +thp_collapse_alloc 1 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/memory.swap.current +Lines: 1 +0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/memory.swap.events +Lines: 3 +high 0 +max 0 +fail 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/memory.swap.high +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/memory.swap.max +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/task_0 +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/task_0/cgroup.controllers +Lines: 1 +cpuset cpu memory +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/task_0/cgroup.events +Lines: 2 +populated 1 +frozen 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/task_0/cgroup.freeze +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/task_0/cgroup.max.depth +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/task_0/cgroup.max.descendants +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/task_0/cgroup.procs +Lines: 4 +3346596 +3346674 +3401141 +3401142 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/task_0/cgroup.stat +Lines: 2 +nr_descendants 0 +nr_dying_descendants 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/task_0/cgroup.subtree_control +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/task_0/cgroup.threads +Lines: 7 +3346596 +3346674 +3401141 +3401142 +3401143 +3401144 +3401145 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/task_0/cgroup.type +Lines: 1 +domain +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/task_0/cpu.max +Lines: 1 +max 100000 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/task_0/cpu.pressure +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/task_0/cpu.stat +Lines: 6 +usage_usec 52032326 +user_usec 49992531 +system_usec 2039795 +nr_periods 0 +nr_throttled 0 +throttled_usec 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/task_0/cpu.weight +Lines: 1 +100 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/task_0/cpu.weight.nice +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/task_0/cpuset.cpus +Lines: 1 + +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/task_0/cpuset.cpus.effective +Lines: 1 +1,41 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/task_0/cpuset.cpus.partition +Lines: 1 +member +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/task_0/cpuset.mems +Lines: 1 + +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/task_0/cpuset.mems.effective +Lines: 1 +0-1 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/task_0/io.pressure +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/task_0/memory.current +Lines: 1 +7008256 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/task_0/memory.events +Lines: 5 +low 0 +high 0 +max 0 +oom 0 +oom_kill 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/task_0/memory.events.local +Lines: 5 +low 0 +high 0 +max 0 +oom 0 +oom_kill 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/task_0/memory.high +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/task_0/memory.low +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/task_0/memory.max +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/task_0/memory.min +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/task_0/memory.numa_stat +Lines: 26 +anon N0=5988352 N1=159744 +file N0=0 N1=0 +kernel_stack N0=98304 N1=0 +pagetables N0=405504 N1=0 +shmem N0=0 N1=0 +file_mapped N0=0 N1=0 +file_dirty N0=0 N1=0 +file_writeback N0=0 N1=0 +swapcached N0=0 N1=0 +anon_thp N0=0 N1=0 +file_thp N0=0 N1=0 +shmem_thp N0=0 N1=0 +inactive_anon N0=5959680 N1=159744 +active_anon N0=28672 N1=0 +inactive_file N0=0 N1=0 +active_file N0=0 N1=0 +unevictable N0=0 N1=0 +slab_reclaimable N0=31048 N1=10592 +slab_unreclaimable N0=178736 N1=45184 +workingset_refault_anon N0=0 N1=0 +workingset_refault_file N0=0 N1=0 +workingset_activate_anon N0=0 N1=0 +workingset_activate_file N0=0 N1=0 +workingset_restore_anon N0=0 N1=0 +workingset_restore_file N0=0 N1=0 +workingset_nodereclaim N0=0 N1=0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/task_0/memory.oom.group +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/task_0/memory.pressure +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/task_0/memory.stat +Lines: 40 +anon 6148096 +file 0 +kernel_stack 98304 +pagetables 405504 +percpu 0 +sock 0 +shmem 0 +file_mapped 0 +file_dirty 0 +file_writeback 0 +swapcached 0 +anon_thp 0 +file_thp 0 +shmem_thp 0 +inactive_anon 6119424 +active_anon 28672 +inactive_file 0 +active_file 0 +unevictable 0 +slab_reclaimable 41640 +slab_unreclaimable 223920 +slab 265560 +workingset_refault_anon 0 +workingset_refault_file 0 +workingset_activate_anon 0 +workingset_activate_file 0 +workingset_restore_anon 0 +workingset_restore_file 0 +workingset_nodereclaim 0 +pgfault 110576 +pgmajfault 0 +pgrefill 0 +pgscan 0 +pgsteal 0 +pgactivate 144 +pgdeactivate 0 +pglazyfree 0 +pglazyfreed 0 +thp_fault_alloc 182 +thp_collapse_alloc 1 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/task_0/memory.swap.current +Lines: 1 +0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/task_0/memory.swap.events +Lines: 3 +high 0 +max 0 +fail 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/task_0/memory.swap.high +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_batch/user/task_0/memory.swap.max +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/cgroup.controllers +Lines: 1 +cpuset cpu memory +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/cgroup.events +Lines: 2 +populated 1 +frozen 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/cgroup.freeze +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/cgroup.max.depth +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/cgroup.max.descendants +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/cgroup.procs +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/cgroup.stat +Lines: 2 +nr_descendants 3 +nr_dying_descendants 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/cgroup.subtree_control +Lines: 1 +cpuset cpu memory +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/cgroup.threads +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/cgroup.type +Lines: 1 +domain +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/cpu.max +Lines: 1 +max 100000 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/cpu.pressure +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/cpu.stat +Lines: 6 +usage_usec 486698 +user_usec 121640 +system_usec 365057 +nr_periods 0 +nr_throttled 0 +throttled_usec 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/cpu.weight +Lines: 1 +100 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/cpu.weight.nice +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/cpuset.cpus +Lines: 1 + +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/cpuset.cpus.effective +Lines: 1 +1,41 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/cpuset.cpus.partition +Lines: 1 +member +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/cpuset.mems +Lines: 1 + +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/cpuset.mems.effective +Lines: 1 +0-1 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/io.pressure +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/memory.current +Lines: 1 +1155072 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/memory.events +Lines: 5 +low 0 +high 0 +max 0 +oom 0 +oom_kill 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/memory.events.local +Lines: 5 +low 0 +high 0 +max 0 +oom 0 +oom_kill 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/memory.high +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/memory.low +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/memory.max +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/memory.min +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/memory.numa_stat +Lines: 26 +anon N0=147456 N1=32768 +file N0=0 N1=0 +kernel_stack N0=16384 N1=0 +pagetables N0=53248 N1=0 +shmem N0=0 N1=0 +file_mapped N0=0 N1=0 +file_dirty N0=0 N1=0 +file_writeback N0=0 N1=0 +swapcached N0=0 N1=0 +anon_thp N0=0 N1=0 +file_thp N0=0 N1=0 +shmem_thp N0=0 N1=0 +inactive_anon N0=143360 N1=32768 +active_anon N0=4096 N1=0 +inactive_file N0=0 N1=0 +active_file N0=0 N1=0 +unevictable N0=0 N1=0 +slab_reclaimable N0=4272 N1=9048 +slab_unreclaimable N0=19792 N1=7248 +workingset_refault_anon N0=0 N1=0 +workingset_refault_file N0=0 N1=0 +workingset_activate_anon N0=0 N1=0 +workingset_activate_file N0=0 N1=0 +workingset_restore_anon N0=0 N1=0 +workingset_restore_file N0=0 N1=0 +workingset_nodereclaim N0=0 N1=0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/memory.oom.group +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/memory.pressure +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/memory.stat +Lines: 40 +anon 180224 +file 0 +kernel_stack 16384 +pagetables 53248 +percpu 833280 +sock 0 +shmem 0 +file_mapped 0 +file_dirty 0 +file_writeback 0 +swapcached 0 +anon_thp 0 +file_thp 0 +shmem_thp 0 +inactive_anon 176128 +active_anon 4096 +inactive_file 0 +active_file 0 +unevictable 0 +slab_reclaimable 13320 +slab_unreclaimable 27040 +slab 40360 +workingset_refault_anon 0 +workingset_refault_file 0 +workingset_activate_anon 0 +workingset_activate_file 0 +workingset_restore_anon 0 +workingset_restore_file 0 +workingset_nodereclaim 0 +pgfault 174 +pgmajfault 0 +pgrefill 0 +pgscan 0 +pgsteal 0 +pgactivate 0 +pgdeactivate 0 +pglazyfree 0 +pglazyfreed 0 +thp_fault_alloc 0 +thp_collapse_alloc 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/memory.swap.current +Lines: 1 +0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/memory.swap.events +Lines: 3 +high 0 +max 0 +fail 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/memory.swap.high +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/memory.swap.max +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/slurm +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/slurm/cgroup.controllers +Lines: 1 +cpuset cpu memory +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/slurm/cgroup.events +Lines: 2 +populated 1 +frozen 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/slurm/cgroup.freeze +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/slurm/cgroup.max.depth +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/slurm/cgroup.max.descendants +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/slurm/cgroup.procs +Lines: 1 +3346540 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/slurm/cgroup.stat +Lines: 2 +nr_descendants 0 +nr_dying_descendants 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/slurm/cgroup.subtree_control +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/slurm/cgroup.threads +Lines: 4 +3346540 +3346549 +3346551 +3346553 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/slurm/cgroup.type +Lines: 1 +domain +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/slurm/cpu.max +Lines: 1 +max 100000 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/slurm/cpu.pressure +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/slurm/cpu.stat +Lines: 6 +usage_usec 486698 +user_usec 121640 +system_usec 365057 +nr_periods 0 +nr_throttled 0 +throttled_usec 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/slurm/cpu.weight +Lines: 1 +100 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/slurm/cpu.weight.nice +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/slurm/cpuset.cpus +Lines: 1 + +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/slurm/cpuset.cpus.effective +Lines: 1 +1,41 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/slurm/cpuset.cpus.partition +Lines: 1 +member +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/slurm/cpuset.mems +Lines: 1 + +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/slurm/cpuset.mems.effective +Lines: 1 +0-1 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/slurm/io.pressure +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/slurm/memory.current +Lines: 1 +319488 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/slurm/memory.events +Lines: 5 +low 0 +high 0 +max 0 +oom 0 +oom_kill 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/slurm/memory.events.local +Lines: 5 +low 0 +high 0 +max 0 +oom 0 +oom_kill 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/slurm/memory.high +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/slurm/memory.low +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/slurm/memory.max +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/slurm/memory.min +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/slurm/memory.numa_stat +Lines: 26 +anon N0=147456 N1=32768 +file N0=0 N1=0 +kernel_stack N0=16384 N1=0 +pagetables N0=53248 N1=0 +shmem N0=0 N1=0 +file_mapped N0=0 N1=0 +file_dirty N0=0 N1=0 +file_writeback N0=0 N1=0 +swapcached N0=0 N1=0 +anon_thp N0=0 N1=0 +file_thp N0=0 N1=0 +shmem_thp N0=0 N1=0 +inactive_anon N0=143360 N1=32768 +active_anon N0=4096 N1=0 +inactive_file N0=0 N1=0 +active_file N0=0 N1=0 +unevictable N0=0 N1=0 +slab_reclaimable N0=4272 N1=9048 +slab_unreclaimable N0=19792 N1=7248 +workingset_refault_anon N0=0 N1=0 +workingset_refault_file N0=0 N1=0 +workingset_activate_anon N0=0 N1=0 +workingset_activate_file N0=0 N1=0 +workingset_restore_anon N0=0 N1=0 +workingset_restore_file N0=0 N1=0 +workingset_nodereclaim N0=0 N1=0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/slurm/memory.oom.group +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/slurm/memory.pressure +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/slurm/memory.stat +Lines: 40 +anon 180224 +file 0 +kernel_stack 16384 +pagetables 53248 +percpu 0 +sock 0 +shmem 0 +file_mapped 0 +file_dirty 0 +file_writeback 0 +swapcached 0 +anon_thp 0 +file_thp 0 +shmem_thp 0 +inactive_anon 176128 +active_anon 4096 +inactive_file 0 +active_file 0 +unevictable 0 +slab_reclaimable 13320 +slab_unreclaimable 27040 +slab 40360 +workingset_refault_anon 0 +workingset_refault_file 0 +workingset_activate_anon 0 +workingset_activate_file 0 +workingset_restore_anon 0 +workingset_restore_file 0 +workingset_nodereclaim 0 +pgfault 174 +pgmajfault 0 +pgrefill 0 +pgscan 0 +pgsteal 0 +pgactivate 0 +pgdeactivate 0 +pglazyfree 0 +pglazyfreed 0 +thp_fault_alloc 0 +thp_collapse_alloc 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/slurm/memory.swap.current +Lines: 1 +0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/slurm/memory.swap.events +Lines: 3 +high 0 +max 0 +fail 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/slurm/memory.swap.high +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/slurm/memory.swap.max +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/cgroup.controllers +Lines: 1 +cpuset cpu memory +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/cgroup.events +Lines: 2 +populated 1 +frozen 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/cgroup.freeze +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/cgroup.max.depth +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/cgroup.max.descendants +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/cgroup.procs +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/cgroup.stat +Lines: 2 +nr_descendants 1 +nr_dying_descendants 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/cgroup.subtree_control +Lines: 1 +cpuset cpu memory +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/cgroup.threads +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/cgroup.type +Lines: 1 +domain +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/cpu.max +Lines: 1 +max 100000 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/cpu.pressure +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/cpu.stat +Lines: 6 +usage_usec 0 +user_usec 0 +system_usec 0 +nr_periods 0 +nr_throttled 0 +throttled_usec 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/cpu.weight +Lines: 1 +100 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/cpu.weight.nice +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/cpuset.cpus +Lines: 1 +1,41 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/cpuset.cpus.effective +Lines: 1 +1,41 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/cpuset.cpus.partition +Lines: 1 +member +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/cpuset.mems +Lines: 1 +0-1 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/cpuset.mems.effective +Lines: 1 +0-1 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/io.pressure +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/memory.current +Lines: 1 +278528 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/memory.events +Lines: 5 +low 0 +high 0 +max 0 +oom 0 +oom_kill 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/memory.events.local +Lines: 5 +low 0 +high 0 +max 0 +oom 0 +oom_kill 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/memory.high +Lines: 1 +4294967296 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/memory.low +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/memory.max +Lines: 1 +4294967296 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/memory.min +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/memory.numa_stat +Lines: 26 +anon N0=0 N1=0 +file N0=0 N1=0 +kernel_stack N0=0 N1=0 +pagetables N0=0 N1=0 +shmem N0=0 N1=0 +file_mapped N0=0 N1=0 +file_dirty N0=0 N1=0 +file_writeback N0=0 N1=0 +swapcached N0=0 N1=0 +anon_thp N0=0 N1=0 +file_thp N0=0 N1=0 +shmem_thp N0=0 N1=0 +inactive_anon N0=0 N1=0 +active_anon N0=0 N1=0 +inactive_file N0=0 N1=0 +active_file N0=0 N1=0 +unevictable N0=0 N1=0 +slab_reclaimable N0=0 N1=0 +slab_unreclaimable N0=0 N1=0 +workingset_refault_anon N0=0 N1=0 +workingset_refault_file N0=0 N1=0 +workingset_activate_anon N0=0 N1=0 +workingset_activate_file N0=0 N1=0 +workingset_restore_anon N0=0 N1=0 +workingset_restore_file N0=0 N1=0 +workingset_nodereclaim N0=0 N1=0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/memory.oom.group +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/memory.pressure +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/memory.stat +Lines: 40 +anon 0 +file 0 +kernel_stack 0 +pagetables 0 +percpu 277760 +sock 0 +shmem 0 +file_mapped 0 +file_dirty 0 +file_writeback 0 +swapcached 0 +anon_thp 0 +file_thp 0 +shmem_thp 0 +inactive_anon 0 +active_anon 0 +inactive_file 0 +active_file 0 +unevictable 0 +slab_reclaimable 0 +slab_unreclaimable 0 +slab 0 +workingset_refault_anon 0 +workingset_refault_file 0 +workingset_activate_anon 0 +workingset_activate_file 0 +workingset_restore_anon 0 +workingset_restore_file 0 +workingset_nodereclaim 0 +pgfault 0 +pgmajfault 0 +pgrefill 0 +pgscan 0 +pgsteal 0 +pgactivate 0 +pgdeactivate 0 +pglazyfree 0 +pglazyfreed 0 +thp_fault_alloc 0 +thp_collapse_alloc 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/memory.swap.current +Lines: 1 +0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/memory.swap.events +Lines: 3 +high 0 +max 0 +fail 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/memory.swap.high +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/memory.swap.max +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/task_special +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/task_special/cgroup.controllers +Lines: 1 +cpuset cpu memory +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/task_special/cgroup.events +Lines: 2 +populated 1 +frozen 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/task_special/cgroup.freeze +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/task_special/cgroup.max.depth +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/task_special/cgroup.max.descendants +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/task_special/cgroup.procs +Lines: 1 +3346557 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/task_special/cgroup.stat +Lines: 2 +nr_descendants 0 +nr_dying_descendants 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/task_special/cgroup.subtree_control +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/task_special/cgroup.threads +Lines: 1 +3346557 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/task_special/cgroup.type +Lines: 1 +domain +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/task_special/cpu.max +Lines: 1 +max 100000 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/task_special/cpu.pressure +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/task_special/cpu.stat +Lines: 6 +usage_usec 0 +user_usec 0 +system_usec 0 +nr_periods 0 +nr_throttled 0 +throttled_usec 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/task_special/cpu.weight +Lines: 1 +100 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/task_special/cpu.weight.nice +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/task_special/cpuset.cpus +Lines: 1 + +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/task_special/cpuset.cpus.effective +Lines: 1 +1,41 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/task_special/cpuset.cpus.partition +Lines: 1 +member +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/task_special/cpuset.mems +Lines: 1 + +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/task_special/cpuset.mems.effective +Lines: 1 +0-1 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/task_special/io.pressure +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/task_special/memory.current +Lines: 1 +0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/task_special/memory.events +Lines: 5 +low 0 +high 0 +max 0 +oom 0 +oom_kill 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/task_special/memory.events.local +Lines: 5 +low 0 +high 0 +max 0 +oom 0 +oom_kill 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/task_special/memory.high +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/task_special/memory.low +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/task_special/memory.max +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/task_special/memory.min +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/task_special/memory.numa_stat +Lines: 26 +anon N0=0 N1=0 +file N0=0 N1=0 +kernel_stack N0=0 N1=0 +pagetables N0=0 N1=0 +shmem N0=0 N1=0 +file_mapped N0=0 N1=0 +file_dirty N0=0 N1=0 +file_writeback N0=0 N1=0 +swapcached N0=0 N1=0 +anon_thp N0=0 N1=0 +file_thp N0=0 N1=0 +shmem_thp N0=0 N1=0 +inactive_anon N0=0 N1=0 +active_anon N0=0 N1=0 +inactive_file N0=0 N1=0 +active_file N0=0 N1=0 +unevictable N0=0 N1=0 +slab_reclaimable N0=0 N1=0 +slab_unreclaimable N0=0 N1=0 +workingset_refault_anon N0=0 N1=0 +workingset_refault_file N0=0 N1=0 +workingset_activate_anon N0=0 N1=0 +workingset_activate_file N0=0 N1=0 +workingset_restore_anon N0=0 N1=0 +workingset_restore_file N0=0 N1=0 +workingset_nodereclaim N0=0 N1=0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/task_special/memory.oom.group +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/task_special/memory.pressure +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/task_special/memory.stat +Lines: 40 +anon 0 +file 0 +kernel_stack 0 +pagetables 0 +percpu 0 +sock 0 +shmem 0 +file_mapped 0 +file_dirty 0 +file_writeback 0 +swapcached 0 +anon_thp 0 +file_thp 0 +shmem_thp 0 +inactive_anon 0 +active_anon 0 +inactive_file 0 +active_file 0 +unevictable 0 +slab_reclaimable 0 +slab_unreclaimable 0 +slab 0 +workingset_refault_anon 0 +workingset_refault_file 0 +workingset_activate_anon 0 +workingset_activate_file 0 +workingset_restore_anon 0 +workingset_restore_file 0 +workingset_nodereclaim 0 +pgfault 0 +pgmajfault 0 +pgrefill 0 +pgscan 0 +pgsteal 0 +pgactivate 0 +pgdeactivate 0 +pglazyfree 0 +pglazyfreed 0 +thp_fault_alloc 0 +thp_collapse_alloc 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/task_special/memory.swap.current +Lines: 1 +0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/task_special/memory.swap.events +Lines: 3 +high 0 +max 0 +fail 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/task_special/memory.swap.high +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/job_1009248/step_extern/user/task_special/memory.swap.max +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/memory.current +Lines: 1 +53705207808 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/memory.events +Lines: 5 +low 0 +high 0 +max 0 +oom 0 +oom_kill 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/memory.events.local +Lines: 5 +low 0 +high 0 +max 0 +oom 0 +oom_kill 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/memory.high +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/memory.low +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/memory.max +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/memory.min +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/memory.numa_stat +Lines: 26 +anon N0=41179824128 N1=11067662336 +file N0=545075200 N1=547745792 +kernel_stack N0=3309568 N1=3473408 +pagetables N0=91783168 N1=39247872 +shmem N0=24477696 N1=14815232 +file_mapped N0=41238528 N1=28626944 +file_dirty N0=0 N1=0 +file_writeback N0=0 N1=0 +swapcached N0=0 N1=0 +anon_thp N0=40263221248 N1=9638510592 +file_thp N0=0 N1=0 +shmem_thp N0=0 N1=0 +inactive_anon N0=41203634176 N1=11086266368 +active_anon N0=593920 N1=557056 +inactive_file N0=4145152 N1=876544 +active_file N0=516452352 N1=532054016 +unevictable N0=0 N1=0 +slab_reclaimable N0=14635792 N1=6414000 +slab_unreclaimable N0=44437304 N1=48181344 +workingset_refault_anon N0=0 N1=0 +workingset_refault_file N0=6809 N1=2507 +workingset_activate_anon N0=0 N1=0 +workingset_activate_file N0=4640 N1=0 +workingset_restore_anon N0=0 N1=0 +workingset_restore_file N0=4640 N1=0 +workingset_nodereclaim N0=512 N1=0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/memory.oom.group +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/memory.pressure +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/memory.stat +Lines: 40 +anon 52247486464 +file 1092820992 +kernel_stack 6782976 +pagetables 131031040 +percpu 78050560 +sock 40960 +shmem 39292928 +file_mapped 69865472 +file_dirty 0 +file_writeback 0 +swapcached 0 +anon_thp 49901731840 +file_thp 0 +shmem_thp 0 +inactive_anon 52289900544 +active_anon 1150976 +inactive_file 5021696 +active_file 1048506368 +unevictable 0 +slab_reclaimable 21049792 +slab_unreclaimable 92618648 +slab 113668440 +workingset_refault_anon 0 +workingset_refault_file 9316 +workingset_activate_anon 0 +workingset_activate_file 4640 +workingset_restore_anon 0 +workingset_restore_file 4640 +workingset_nodereclaim 512 +pgfault 3901807818 +pgmajfault 0 +pgrefill 67497 +pgscan 66671 +pgsteal 66670 +pgactivate 24671146 +pgdeactivate 67364 +pglazyfree 0 +pglazyfreed 0 +thp_fault_alloc 47230020 +thp_collapse_alloc 60281 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/memory.swap.current +Lines: 1 +0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/memory.swap.events +Lines: 3 +high 0 +max 0 +fail 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/memory.swap.high +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/memory.swap.max +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/pids.current +Lines: 1 +476 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/pids.events +Lines: 1 +max 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/pids.max +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: sys/fs/cgroup/system.slice/slurmstepd.scope/system +Mode: 775 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/system/cgroup.controllers +Lines: 1 +cpuset cpu memory +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/system/cgroup.events +Lines: 2 +populated 1 +frozen 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/system/cgroup.freeze +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/system/cgroup.max.depth +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/system/cgroup.max.descendants +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/system/cgroup.procs +Lines: 1 +1435038 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/system/cgroup.stat +Lines: 2 +nr_descendants 0 +nr_dying_descendants 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/system/cgroup.subtree_control +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/system/cgroup.threads +Lines: 1 +1435038 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/system/cgroup.type +Lines: 1 +domain +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/system/cpu.max +Lines: 1 +max 100000 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/system/cpu.pressure +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/system/cpu.stat +Lines: 6 +usage_usec 9945939 +user_usec 1937135 +system_usec 8008804 +nr_periods 0 +nr_throttled 0 +throttled_usec 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/system/cpu.weight +Lines: 1 +100 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/system/cpu.weight.nice +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/system/cpuset.cpus +Lines: 1 + +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/system/cpuset.cpus.effective +Lines: 1 +0-79 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/system/cpuset.cpus.partition +Lines: 1 +member +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/system/cpuset.mems +Lines: 1 + +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/system/cpuset.mems.effective +Lines: 1 +0-1 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/system/io.pressure +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/system/memory.current +Lines: 1 +38465536 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/system/memory.events +Lines: 5 +low 0 +high 0 +max 0 +oom 0 +oom_kill 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/system/memory.events.local +Lines: 5 +low 0 +high 0 +max 0 +oom 0 +oom_kill 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/system/memory.high +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/system/memory.low +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/system/memory.max +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/system/memory.min +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/system/memory.numa_stat +Lines: 26 +anon N0=10612736 N1=14233600 +file N0=294912 N1=110592 +kernel_stack N0=1097728 N1=1949696 +pagetables N0=1667072 N1=3182592 +shmem N0=57344 N1=53248 +file_mapped N0=151552 N1=49152 +file_dirty N0=0 N1=0 +file_writeback N0=0 N1=0 +swapcached N0=0 N1=0 +anon_thp N0=0 N1=0 +file_thp N0=0 N1=0 +shmem_thp N0=0 N1=0 +inactive_anon N0=10612736 N1=14233600 +active_anon N0=57344 N1=53248 +inactive_file N0=0 N1=0 +active_file N0=237568 N1=57344 +unevictable N0=0 N1=0 +slab_reclaimable N0=367424 N1=425784 +slab_unreclaimable N0=1607840 N1=2824776 +workingset_refault_anon N0=0 N1=0 +workingset_refault_file N0=4 N1=0 +workingset_activate_anon N0=0 N1=0 +workingset_activate_file N0=4 N1=0 +workingset_restore_anon N0=0 N1=0 +workingset_restore_file N0=4 N1=0 +workingset_nodereclaim N0=0 N1=0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/system/memory.oom.group +Lines: 1 +0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/system/memory.pressure +Lines: 0 +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/system/memory.stat +Lines: 40 +anon 24846336 +file 405504 +kernel_stack 3047424 +pagetables 4849664 +percpu 0 +sock 0 +shmem 110592 +file_mapped 200704 +file_dirty 0 +file_writeback 0 +swapcached 0 +anon_thp 0 +file_thp 0 +shmem_thp 0 +inactive_anon 24846336 +active_anon 110592 +inactive_file 0 +active_file 294912 +unevictable 0 +slab_reclaimable 793208 +slab_unreclaimable 4432616 +slab 5225824 +workingset_refault_anon 0 +workingset_refault_file 4 +workingset_activate_anon 0 +workingset_activate_file 4 +workingset_restore_anon 0 +workingset_restore_file 4 +workingset_nodereclaim 0 +pgfault 188273 +pgmajfault 0 +pgrefill 0 +pgscan 0 +pgsteal 0 +pgactivate 310 +pgdeactivate 0 +pglazyfree 0 +pglazyfreed 0 +thp_fault_alloc 0 +thp_collapse_alloc 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/system/memory.swap.current +Lines: 1 +0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/system/memory.swap.events +Lines: 3 +high 0 +max 0 +fail 0 +Mode: 440 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/system/memory.swap.high +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/fs/cgroup/system.slice/slurmstepd.scope/system/memory.swap.max +Lines: 1 +max +Mode: 640 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: sys/.unpacked +Lines: 0 +Mode: 664 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/collector/helper.go b/collector/helper.go new file mode 100644 index 00000000..2c1cf9e8 --- /dev/null +++ b/collector/helper.go @@ -0,0 +1,49 @@ +package collector + +import ( + "fmt" + "os" + "os/exec" + "regexp" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" +) + +var ( + metricNameRegex = regexp.MustCompile(`_*[^0-9A-Za-z_]+_*`) +) + +// Check if file exists +func fileExists(filename string) bool { + info, err := os.Stat(filename) + if os.IsNotExist(err) { + return false + } + return !info.IsDir() +} + +// SanitizeMetricName sanitize the given metric name by replacing invalid characters by underscores. +// +// OpenMetrics and the Prometheus exposition format require the metric name +// to consist only of alphanumericals and "_", ":" and they must not start +// with digits. Since colons in MetricFamily are reserved to signal that the +// MetricFamily is the result of a calculation or aggregation of a general +// purpose monitoring system, colons will be replaced as well. +// +// Note: If not subsequently prepending a namespace and/or subsystem (e.g., +// with prometheus.BuildFQName), the caller must ensure that the supplied +// metricName does not begin with a digit. +func SanitizeMetricName(metricName string) string { + return metricNameRegex.ReplaceAllString(metricName, "_") +} + +// Execute command and return stdout/stderr +func Execute(cmd string, args []string, logger log.Logger) ([]byte, error) { + level.Debug(logger).Log("msg", "Executing", "command", cmd, "args", fmt.Sprintf("%+v", args)) + out, err := exec.Command(cmd, args...).CombinedOutput() + if err != nil { + err = fmt.Errorf("error running %s: %s", cmd, err) + } + return out, err +} \ No newline at end of file diff --git a/collector/ipmi.go b/collector/ipmi.go new file mode 100644 index 00000000..0ec78231 --- /dev/null +++ b/collector/ipmi.go @@ -0,0 +1,106 @@ +// Taken from prometheus-community/ipmi_exporter/blob/master/collector_ipmi.go +// DCMI spec (old) https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/dcmi-v1-5-rev-spec.pdf + +//go:build !noimpi +// +build !noimpi + +package collector + +import ( + "fmt" + "regexp" + "strconv" + "strings" + + "github.com/alecthomas/kingpin/v2" + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" +) + +const ipmiCollectorSubsystem = "ipmi_dcmi" + +type impiCollector struct { + logger log.Logger + + wattsMetricDesc *prometheus.Desc +} + +var ( + ipmiDcmiWrapperExec = kingpin.Flag("collector.ipmi.dcmi.wrapper.path", "Path to IPMI DCMI executable wrapper.").Default("ipmi-dcmi-wrapper").String() + ipmiDCMIPowerMeasurementRegex = regexp.MustCompile(`^Power Measurement\s*:\s*(?PActive|Not\sAvailable).*`) + ipmiDCMICurrentPowerRegex = regexp.MustCompile(`^Current Power\s*:\s*(?P[0-9.]*)\s*Watts.*`) +) + +func init() { + registerCollector(ipmiCollectorSubsystem, defaultEnabled, NewIpmiCollector) +} + +// NewIpmiCollector returns a new Collector exposing IMPI DCMI power metrics. +func NewIpmiCollector(logger log.Logger) (Collector, error) { + + wattsMetricDesc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, ipmiCollectorSubsystem, "watts_total"), + "Current Power consumption in watts", []string{}, nil, + ) + + collector := impiCollector{ + logger: logger, + wattsMetricDesc: wattsMetricDesc, + } + return &collector, nil +} + +// Get value based on regex from IPMI output +func getValue(ipmiOutput []byte, regex *regexp.Regexp) (string, error) { + for _, line := range strings.Split(string(ipmiOutput), "\n") { + match := regex.FindStringSubmatch(line) + if match == nil { + continue + } + for i, name := range regex.SubexpNames() { + if name != "value" { + continue + } + return match[i], nil + } + } + return "", fmt.Errorf("could not find value in output: %s", string(ipmiOutput)) +} + +// Update implements Collector and exposes IPMI DCMI power related metrics. +func (c *impiCollector) Update(ch chan<- prometheus.Metric) error { + args := []string{""} + stdOut, err := Execute(*ipmiDcmiWrapperExec, args, c.logger) + if err != nil { + return err + } + currentPowerConsumption, err := c.getCurrentPowerConsumption(stdOut) + if err != nil { + level.Error(c.logger).Log("msg", "Failed to collect IPMI DCMI data", "error", err) + return err + } + // Returned value negative == Power Measurement is not avail + if currentPowerConsumption > -1 { + ch <- prometheus.MustNewConstMetric(c.wattsMetricDesc, prometheus.CounterValue, float64(currentPowerConsumption)) + } + return nil +} + +// Get current power consumption +func (c *impiCollector) getCurrentPowerConsumption(ipmiOutput []byte) (float64, error) { + // Check for Power Measurement are avail + value, err := getValue(ipmiOutput, ipmiDCMIPowerMeasurementRegex) + if err != nil { + return -1, err + } + // When Power Measurement in 'Active' state - we can get watts + if value == "Active" { + value, err := getValue(ipmiOutput, ipmiDCMICurrentPowerRegex) + if err != nil { + return -1, err + } + return strconv.ParseFloat(value, 64) + } + return -1, nil +} diff --git a/collector/ipmi_test.go b/collector/ipmi_test.go new file mode 100644 index 00000000..c7996ebf --- /dev/null +++ b/collector/ipmi_test.go @@ -0,0 +1,35 @@ +//go:build !noipmi +// +build !noipmi + +package collector + +import ( + "testing" + + "github.com/go-kit/log" +) + +var ( + ipmidcmiStdout = ` +Current Power : 332 Watts +Minimum Power over sampling duration : 68 watts +Maximum Power over sampling duration : 504 watts +Average Power over sampling duration : 348 watts +Time Stamp : 11/03/2023 - 08:36:29 +Statistics reporting time period : 2685198000 milliseconds +Power Measurement : Active + +` + expectedPower = float64(332) +) + +func TestIpmiMetrics(t *testing.T) { + c := impiCollector{logger: log.NewNopLogger()} + value, err := c.getCurrentPowerConsumption([]byte(ipmidcmiStdout)) + if err != nil { + t.Errorf("failed to parse IPMI DCMI output: %v", err) + } + if value != expectedPower { + t.Fatalf("expected power %f. Got %f", expectedPower, value) + } +} diff --git a/collector/nvidia_gpus.go b/collector/nvidia_gpus.go new file mode 100644 index 00000000..ce90b26f --- /dev/null +++ b/collector/nvidia_gpus.go @@ -0,0 +1,124 @@ +//go:build !nonvidia +// +build !nonvidia + +package collector + +import ( + "fmt" + "os" + "strings" + + "github.com/alecthomas/kingpin/v2" + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" +) + +const nvidiaGpuJobMapCollectorSubsystem = "nvidia_gpu" + +var ( + gpuStatPath = kingpin.Flag("collector.nvidia.gpu.stat.path", "Path to gpustat file that maps GPU ordinals to job IDs.").Default("/run/gpustat").String() +) + +type Device struct { + name string + uuid string + isMig bool +} + +type nvidiaGpuJobMapCollector struct { + devices []Device + logger log.Logger + gpuJobMapDesc *prometheus.Desc +} + +func init() { + registerCollector(nvidiaGpuJobMapCollectorSubsystem, defaultDisabled, NewNvidiaGpuJobMapCollector) +} + +// Get all physical or MIG devices using nvidia-smi command +// Example output: +// bash-4.4$ nvidia-smi --query-gpu=name,uuid --format=csv +// name, uuid +// Tesla V100-SXM2-32GB, GPU-f124aa59-d406-d45b-9481-8fcd694e6c9e +// Tesla V100-SXM2-32GB, GPU-61a65011-6571-a6d2-5ab8-66cbb6f7f9c3 +// +// Here we are using nvidia-smi to avoid having build issues if we use +// nvml go bindings. This way we dont have deps on nvidia stuff and keep +// exporter simple. +// +// NOTE: Hoping this command returns MIG devices too +func getAllDevices(logger log.Logger) ([]Device, error) { + args := []string{"--query-gpu=name,uuid", "--format=csv"} + nvidiaSmiOutput, err := Execute("nvidia-smi", args, logger) + if err != nil { + level.Error(logger).Log("msg", "nvidia-smi command to get list of devices failed due to", err) + return nil, err + } + allDevices := []Device{} + for _, line := range strings.Split(string(nvidiaSmiOutput), "\n") { + // Header line + if strings.HasPrefix(line, "name") { + continue + } + devDetails := strings.Split(line, ",") + if len(devDetails) < 2 { + level.Error(logger).Log("msg", "Cannot parse output from nvidia-smi command", line) + continue + } + devName := strings.TrimSpace(devDetails[0]) + devUuid := strings.TrimSpace(devDetails[1]) + isMig := false + if strings.HasPrefix(devUuid, "MIG") { + isMig = true + } + allDevices = append(allDevices, Device{name: devName, uuid: devUuid, isMig: isMig}) + } + return allDevices, nil +} + +// NewNvidiaGpuJobMapCollector returns a new Collector exposing batch jobs to nVIDIA GPU ordinals mapping. +func NewNvidiaGpuJobMapCollector(logger log.Logger) (Collector, error) { + allDevices, _ := getAllDevices(logger) + gpuJobMapDesc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, nvidiaGpuJobMapCollectorSubsystem, "jobid"), + "Batch Job ID of current nVIDIA GPU", + []string{"uuid"}, nil, + ) + + collector := nvidiaGpuJobMapCollector{ + devices: allDevices, + logger: logger, + gpuJobMapDesc: gpuJobMapDesc, + } + return &collector, nil +} + +// Update implements Collector and exposes IPMI DCMI power related metrics. +func (c *nvidiaGpuJobMapCollector) Update(ch chan<- prometheus.Metric) error { + gpuJobMapper, _ := c.getJobId() + for _, dev := range c.devices { + ch <- prometheus.MustNewConstMetric(c.gpuJobMapDesc, prometheus.GaugeValue, gpuJobMapper[dev.uuid], dev.uuid) + } + return nil +} + +// Read gpustat file and get job ID of each GPU +func (c *nvidiaGpuJobMapCollector) getJobId() (map[string]float64, error) { + gpuJobMapper := make(map[string]float64) + for _, dev := range c.devices { + var jobId int64 = 0 + var slurmInfo string = fmt.Sprintf("%s/%s", *gpuStatPath, dev.uuid) + + if _, err := os.Stat(slurmInfo); err == nil { + content, err := os.ReadFile(slurmInfo) + if err != nil { + level.Error(c.logger).Log("msg", "Failed to get job ID for GPU", dev.uuid, "due to err", err) + gpuJobMapper[dev.uuid] = float64(0) + } + fmt.Sscanf(string(content), "%d", &jobId) + gpuJobMapper[dev.uuid] = float64(jobId) + } + } + return gpuJobMapper, nil +} diff --git a/collector/nvidia_gpus_test.go b/collector/nvidia_gpus_test.go new file mode 100644 index 00000000..0bce4bb6 --- /dev/null +++ b/collector/nvidia_gpus_test.go @@ -0,0 +1,33 @@ +//go:build !nonvidia +// +build !nonvidia + +package collector + +import ( + "testing" + + "github.com/alecthomas/kingpin/v2" + "github.com/go-kit/log" +) + +var ( + devices = []Device{{name: "fakeGpu1", + uuid: "GPU-f124aa59-d406-d45b-9481-8fcd694e6c9e", + isMig: false}, {name: "fakeGpu2", + uuid: "GPU-61a65011-6571-a6d2-5ab8-66cbb6f7f9c3", + isMig: false}} +) + +func TestNvidiaJobGpuMap(t *testing.T) { + if _, err := kingpin.CommandLine.Parse([]string{"--collector.nvidia.gpu.stat.path", "fixtures/gpustat"}); err != nil { + t.Fatal(err) + } + c := nvidiaGpuJobMapCollector{devices: devices, logger: log.NewNopLogger()} + gpuJobMapper, _ := c.getJobId() + if gpuJobMapper["GPU-f124aa59-d406-d45b-9481-8fcd694e6c9e"] != 10000 { + t.Fatalf("Expected Job ID is %d: \nGot %f", 10000, gpuJobMapper["GPU-f124aa59-d406-d45b-9481-8fcd694e6c9e"]) + } + if gpuJobMapper["GPU-61a65011-6571-a6d2-5ab8-66cbb6f7f9c3"] != 11000 { + t.Fatalf("Expected Job ID is %d: \nGot %f", 11000, gpuJobMapper["GPU-61a65011-6571-a6d2-5ab8-66cbb6f7f9c3"]) + } +} diff --git a/collector/paths.go b/collector/paths.go new file mode 100644 index 00000000..2be0284d --- /dev/null +++ b/collector/paths.go @@ -0,0 +1,21 @@ +package collector + +import ( + "path/filepath" + + "github.com/alecthomas/kingpin/v2" +) + +var ( + // The path of the proc filesystem. + sysPath = kingpin.Flag("path.sysfs", "sysfs mountpoint.").Default("/sys").String() + cgroupfsPath = kingpin.Flag("path.cgroupfs", "cgroupfs mountpoint.").Default("/sys/fs/cgroup").String() +) + +func sysFilePath(name string) string { + return filepath.Join(*sysPath, name) +} + +func cgroupFilePath(name string) string { + return filepath.Join(*cgroupfsPath, name) +} diff --git a/collector/paths_test.go b/collector/paths_test.go new file mode 100644 index 00000000..955ac303 --- /dev/null +++ b/collector/paths_test.go @@ -0,0 +1,65 @@ +// Taken from node_exporter/collectors/paths_test.go and modified + +package collector + +import ( + "testing" + + "github.com/alecthomas/kingpin/v2" +) + +func TestDefaultSysPath(t *testing.T) { + if _, err := kingpin.CommandLine.Parse([]string{"--path.sysfs", "/sys"}); err != nil { + t.Fatal(err) + } + + if got, want := sysFilePath("somefile"), "/sys/somefile"; got != want { + t.Errorf("Expected: %s, Got: %s", want, got) + } + + if got, want := sysFilePath("some/file"), "/sys/some/file"; got != want { + t.Errorf("Expected: %s, Got: %s", want, got) + } +} + +func TestCustomSysPath(t *testing.T) { + if _, err := kingpin.CommandLine.Parse([]string{"--path.sysfs", "./../some/./place/"}); err != nil { + t.Fatal(err) + } + + if got, want := sysFilePath("somefile"), "../some/place/somefile"; got != want { + t.Errorf("Expected: %s, Got: %s", want, got) + } + + if got, want := sysFilePath("some/file"), "../some/place/some/file"; got != want { + t.Errorf("Expected: %s, Got: %s", want, got) + } +} + +func TestDefaultCgroupPath(t *testing.T) { + if _, err := kingpin.CommandLine.Parse([]string{"--path.cgroupfs", "/sys/fs/cgroup"}); err != nil { + t.Fatal(err) + } + + if got, want := cgroupFilePath("somefile"), "/sys/fs/cgroup/somefile"; got != want { + t.Errorf("Expected: %s, Got: %s", want, got) + } + + if got, want := cgroupFilePath("some/file"), "/sys/fs/cgroup/some/file"; got != want { + t.Errorf("Expected: %s, Got: %s", want, got) + } +} + +func TestCustomCgroupPath(t *testing.T) { + if _, err := kingpin.CommandLine.Parse([]string{"--path.cgroupfs", "./../some/./place/"}); err != nil { + t.Fatal(err) + } + + if got, want := cgroupFilePath("somefile"), "../some/place/somefile"; got != want { + t.Errorf("Expected: %s, Got: %s", want, got) + } + + if got, want := cgroupFilePath("some/file"), "../some/place/some/file"; got != want { + t.Errorf("Expected: %s, Got: %s", want, got) + } +} diff --git a/collector/rapl.go b/collector/rapl.go new file mode 100644 index 00000000..5f04cd20 --- /dev/null +++ b/collector/rapl.go @@ -0,0 +1,129 @@ +// Taken from node_exporter/collector/rapl_linux.go + +//go:build !norapl +// +build !norapl + +package collector + +import ( + "errors" + "fmt" + "os" + "strconv" + + "github.com/alecthomas/kingpin/v2" + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/procfs/sysfs" +) + +const raplCollectorSubsystem = "rapl" + +type raplCollector struct { + fs sysfs.FS + logger log.Logger + + joulesMetricDesc *prometheus.Desc +} + +func init() { + registerCollector(raplCollectorSubsystem, defaultEnabled, NewRaplCollector) +} + +var ( + raplZoneLabel = kingpin.Flag("collector.rapl.enable-zone-label", "Enables RAPL zone labels").Default("false").Bool() +) + +// NewRaplCollector returns a new Collector exposing RAPL metrics. +func NewRaplCollector(logger log.Logger) (Collector, error) { + fs, err := sysfs.NewFS(*sysPath) + + if err != nil { + return nil, err + } + + joulesMetricDesc := prometheus.NewDesc( + prometheus.BuildFQName(namespace, raplCollectorSubsystem, "joules_total"), + "Current RAPL value in joules", + []string{"index", "path", "rapl_zone"}, nil, + ) + + collector := raplCollector{ + fs: fs, + logger: logger, + joulesMetricDesc: joulesMetricDesc, + } + return &collector, nil +} + +// Update implements Collector and exposes RAPL related metrics. +func (c *raplCollector) Update(ch chan<- prometheus.Metric) error { + // nil zones are fine when platform doesn't have powercap files present. + zones, err := sysfs.GetRaplZones(c.fs) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + level.Debug(c.logger).Log("msg", "Platform doesn't have powercap files present", "err", err) + return ErrNoData + } + if errors.Is(err, os.ErrPermission) { + level.Debug(c.logger).Log("msg", "Can't access powercap files", "err", err) + return ErrNoData + } + return fmt.Errorf("failed to retrieve rapl stats: %w", err) + } + + for _, rz := range zones { + microJoules, err := rz.GetEnergyMicrojoules() + if err != nil { + if errors.Is(err, os.ErrPermission) { + level.Debug(c.logger).Log("msg", "Can't access energy_uj file", "zone", rz, "err", err) + return ErrNoData + } + return err + } + + joules := float64(microJoules) / 1000000.0 + + if *raplZoneLabel { + ch <- c.joulesMetricWithZoneLabel(rz, joules) + } else { + ch <- c.joulesMetric(rz, joules) + } + } + return nil +} + +func (c *raplCollector) joulesMetric(z sysfs.RaplZone, v float64) prometheus.Metric { + index := strconv.Itoa(z.Index) + descriptor := prometheus.NewDesc( + prometheus.BuildFQName( + namespace, + raplCollectorSubsystem, + fmt.Sprintf("%s_joules_total", SanitizeMetricName(z.Name)), + ), + fmt.Sprintf("Current RAPL %s value in joules", z.Name), + []string{"index", "path"}, nil, + ) + + return prometheus.MustNewConstMetric( + descriptor, + prometheus.CounterValue, + v, + index, + z.Path, + ) +} + +func (c *raplCollector) joulesMetricWithZoneLabel(z sysfs.RaplZone, v float64) prometheus.Metric { + index := strconv.Itoa(z.Index) + + return prometheus.MustNewConstMetric( + c.joulesMetricDesc, + prometheus.CounterValue, + v, + index, + z.Path, + z.Name, + ) +} diff --git a/collector/rapl_test.go b/collector/rapl_test.go new file mode 100644 index 00000000..ffbe7b5c --- /dev/null +++ b/collector/rapl_test.go @@ -0,0 +1,38 @@ +//go:build !norapl +// +build !norapl + +package collector + +import ( + "testing" + + "github.com/alecthomas/kingpin/v2" + "github.com/go-kit/log" + "github.com/prometheus/procfs/sysfs" +) + +var expectedEnergyMetrics = []int{258218293244, 130570505826} + +func TestRaplMetrics(t *testing.T) { + if _, err := kingpin.CommandLine.Parse([]string{"--path.sysfs", "fixtures/sys"}); err != nil { + t.Fatal(err) + } + fs, err := sysfs.NewFS(*sysPath) + if err != nil { + t.Errorf("failed to open procfs: %v", err) + } + c := raplCollector{fs: fs, logger: log.NewNopLogger()} + zones, err := sysfs.GetRaplZones(c.fs) + if err != nil { + t.Errorf("failed to get RAPL zones: %v", err) + } + for iz, rz := range zones { + microJoules, err := rz.GetEnergyMicrojoules() + if err != nil { + t.Fatalf("Cannot retrieve energy data from GetEnergyMicrojoules function: %v ", err) + } + if expectedEnergyMetrics[iz] != int(microJoules) { + t.Fatalf("Expected energy value %d: Got: %d ", expectedEnergyMetrics[iz], microJoules) + } + } +} diff --git a/collector/slurm.go b/collector/slurm.go new file mode 100644 index 00000000..3fff1d9b --- /dev/null +++ b/collector/slurm.go @@ -0,0 +1,435 @@ +//go:build !noslurm +// +build !noslurm + +package collector + +import ( + "fmt" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "sync" + + "github.com/alecthomas/kingpin/v2" + "github.com/containerd/cgroups/v3" + "github.com/containerd/cgroups/v3/cgroup1" + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/prometheus/client_golang/prometheus" +) + +const slurmCollectorSubsystem = "slurm_job" + +var ( + cgroupV2 = false + metricLock = sync.RWMutex{} + collectJobSteps = kingpin.Flag("collector.slurm.jobsteps.metrics", "Whether to collect metrics of all slurm job steps and tasks [WARNING: This option can result in very high cardinality of metrics].").Default("false").Bool() +) + +type CgroupMetric struct { + name string + cpuUser float64 + cpuSystem float64 + cpuTotal float64 + cpus int + memoryRSS float64 + memoryCache float64 + memoryUsed float64 + memoryTotal float64 + memoryFailCount float64 + memswUsed float64 + memswTotal float64 + memswFailCount float64 + userslice bool + uid int + jobid string + step string + task string + batch string + err bool +} + +type slurmCollector struct { + cgroupV2 bool + cpuUser *prometheus.Desc + cpuSystem *prometheus.Desc + cpuTotal *prometheus.Desc + cpus *prometheus.Desc + memoryRSS *prometheus.Desc + memoryCache *prometheus.Desc + memoryUsed *prometheus.Desc + memoryTotal *prometheus.Desc + memoryFailCount *prometheus.Desc + memswUsed *prometheus.Desc + memswTotal *prometheus.Desc + memswFailCount *prometheus.Desc + collectError *prometheus.Desc + logger log.Logger +} + +func init() { + registerCollector(slurmCollectorSubsystem, defaultEnabled, NewSlurmCollector) +} + +// NewSlurmCollector returns a new Collector exposing a summary of cgroups. +func NewSlurmCollector(logger log.Logger) (Collector, error) { + if cgroups.Mode() == cgroups.Unified { + cgroupV2 = true + level.Info(logger).Log("msg", "Cgroup version v2 detected on ", "mount", *cgroupfsPath) + } else { + level.Info(logger).Log("msg", "Cgroup version v2 not detected, will proceed with v1.") + } + return &slurmCollector{ + cgroupV2: cgroupV2, + cpuUser: prometheus.NewDesc(prometheus.BuildFQName(namespace, "cpu", "user_seconds"), + "Cumulative CPU user seconds for jobid", []string{"batch", "jobid", "step", "task"}, nil), + cpuSystem: prometheus.NewDesc(prometheus.BuildFQName(namespace, "cpu", "system_seconds"), + "Cumulative CPU system seconds for jobid", []string{"batch", "jobid", "step", "task"}, nil), + cpuTotal: prometheus.NewDesc(prometheus.BuildFQName(namespace, "cpu", "total_seconds"), + "Cumulative CPU total seconds for jobid", []string{"batch", "jobid", "step", "task"}, nil), + cpus: prometheus.NewDesc(prometheus.BuildFQName(namespace, "", "cpus"), + "Number of CPUs in the jobid", []string{"batch", "jobid", "step", "task"}, nil), + memoryRSS: prometheus.NewDesc(prometheus.BuildFQName(namespace, "memory", "rss_bytes"), + "Memory RSS used in bytes", []string{"batch", "jobid", "step", "task"}, nil), + memoryCache: prometheus.NewDesc(prometheus.BuildFQName(namespace, "memory", "cache_bytes"), + "Memory cache used in bytes", []string{"batch", "jobid", "step", "task"}, nil), + memoryUsed: prometheus.NewDesc(prometheus.BuildFQName(namespace, "memory", "used_bytes"), + "Memory used in bytes", []string{"batch", "jobid", "step", "task"}, nil), + memoryTotal: prometheus.NewDesc(prometheus.BuildFQName(namespace, "memory", "total_bytes"), + "Memory total given to jobid in bytes", []string{"batch", "jobid", "step", "task"}, nil), + memoryFailCount: prometheus.NewDesc(prometheus.BuildFQName(namespace, "memory", "fail_count"), + "Memory fail count", []string{"batch", "jobid", "step", "task"}, nil), + memswUsed: prometheus.NewDesc(prometheus.BuildFQName(namespace, "memsw", "used_bytes"), + "Swap used in bytes", []string{"batch", "jobid", "step", "task"}, nil), + memswTotal: prometheus.NewDesc(prometheus.BuildFQName(namespace, "memsw", "total_bytes"), + "Swap total given to jobid in bytes", []string{"batch", "jobid", "step", "task"}, nil), + memswFailCount: prometheus.NewDesc(prometheus.BuildFQName(namespace, "memsw", "fail_count"), + "Swap fail count", []string{"batch", "jobid", "step", "task"}, nil), + collectError: prometheus.NewDesc(prometheus.BuildFQName(namespace, "exporter", "collect_error"), + "Indicates collection error, 0=no error, 1=error", []string{"batch", "jobid", "step", "task"}, nil), + logger: logger, + }, nil +} + +// Return cgroups v1 subsystem +func subsystem() ([]cgroup1.Subsystem, error) { + s := []cgroup1.Subsystem{ + cgroup1.NewCpuacct(*cgroupfsPath), + cgroup1.NewMemory(*cgroupfsPath), + } + return s, nil +} + +// Update implements Collector and exposes cgroup statistics. +func (c *slurmCollector) Update(ch chan<- prometheus.Metric) error { + metrics, err := c.getJobsMetrics() + if err != nil { + return err + } + for n, m := range metrics { + if m.err { + ch <- prometheus.MustNewConstMetric(c.collectError, prometheus.GaugeValue, 1, m.name) + } + ch <- prometheus.MustNewConstMetric(c.cpuUser, prometheus.GaugeValue, m.cpuUser, m.batch, m.jobid, m.step, m.task) + ch <- prometheus.MustNewConstMetric(c.cpuSystem, prometheus.GaugeValue, m.cpuSystem, m.batch, m.jobid, m.step, m.task) + ch <- prometheus.MustNewConstMetric(c.cpuTotal, prometheus.GaugeValue, m.cpuTotal, m.batch, m.jobid, m.step, m.task) + cpus := m.cpus + if cpus == 0 { + dir := filepath.Dir(n) + cpus = metrics[dir].cpus + if cpus == 0 { + cpus = metrics[filepath.Dir(dir)].cpus + } + } + ch <- prometheus.MustNewConstMetric(c.cpus, prometheus.GaugeValue, float64(cpus), m.batch, m.jobid, m.step, m.task) + ch <- prometheus.MustNewConstMetric(c.memoryRSS, prometheus.GaugeValue, m.memoryRSS, m.batch, m.jobid, m.step, m.task) + ch <- prometheus.MustNewConstMetric(c.memoryCache, prometheus.GaugeValue, m.memoryCache, m.batch, m.jobid, m.step, m.task) + ch <- prometheus.MustNewConstMetric(c.memoryUsed, prometheus.GaugeValue, m.memoryUsed, m.batch, m.jobid, m.step, m.task) + ch <- prometheus.MustNewConstMetric(c.memoryTotal, prometheus.GaugeValue, m.memoryTotal, m.batch, m.jobid, m.step, m.task) + ch <- prometheus.MustNewConstMetric(c.memoryFailCount, prometheus.GaugeValue, m.memoryFailCount, m.batch, m.jobid, m.step, m.task) + ch <- prometheus.MustNewConstMetric(c.memswUsed, prometheus.GaugeValue, m.memswUsed, m.batch, m.jobid, m.step, m.task) + ch <- prometheus.MustNewConstMetric(c.memswTotal, prometheus.GaugeValue, m.memswTotal, m.batch, m.jobid, m.step, m.task) + ch <- prometheus.MustNewConstMetric(c.memswFailCount, prometheus.GaugeValue, m.memswFailCount, m.batch, m.jobid, m.step, m.task) + } + return nil +} + +// Get current Jobs metrics from cgroups +func (c *slurmCollector) getJobsMetrics() (map[string]CgroupMetric, error) { + var names []string + var metrics = make(map[string]CgroupMetric) + var topPath string + var fullPath string + if c.cgroupV2 { + topPath = *cgroupfsPath + fullPath = topPath + "/system.slice/slurmstepd.scope" + } else { + topPath = *cgroupfsPath + "/cpuacct" + fullPath = topPath + "/slurm" + } + level.Debug(c.logger).Log("msg", "Loading cgroup", "path", fullPath) + err := filepath.Walk(fullPath, func(p string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() && strings.Contains(p, "/job_") && !strings.HasSuffix(p, "/slurm") && !strings.HasSuffix(p, "/user") { + if !*collectJobSteps && strings.Contains(p, "/step_") { + return nil + } + rel, _ := filepath.Rel(topPath, p) + level.Debug(c.logger).Log("msg", "Get Name", "name", p, "rel", rel) + names = append(names, "/"+rel) + } + return nil + }) + if err != nil { + level.Error(c.logger).Log("msg", "Error walking cgroup subsystem", "path", fullPath, "err", err) + return metrics, nil + } + wg := &sync.WaitGroup{} + wg.Add(len(names)) + for _, name := range names { + go func(n string) { + metric, _ := c.getMetrics(n) + if !metric.err { + metricLock.Lock() + metrics[n] = metric + metricLock.Unlock() + } + wg.Done() + }(name) + } + wg.Wait() + // if memory.max = "max" case we set memory max to -1 + // fix it by looking at the parent + // we loop through names once as it was the result of Walk so top paths are seen first + // also some cgroups we ignore, like path=/system.slice/slurmstepd.scope/job_216/step_interactive/user, hence the need to loop through multiple parents + if c.cgroupV2 { + for _, name := range names { + metric, ok := metrics[name] + if ok && metric.memoryTotal < 0 { + for upName := name; len(upName) > 1; { + upName = filepath.Dir(upName) + upMetric, ok := metrics[upName] + if ok { + metric.memoryTotal = upMetric.memoryTotal + metrics[name] = metric + } + } + } + } + } + return metrics, nil +} + +// Get metrics of a given SLURM cgroups path +func (c *slurmCollector) getMetrics(name string) (CgroupMetric, error) { + if c.cgroupV2 { + return c.getCgroupsV2Metrics(name) + } else { + return c.getCgroupsV1Metrics(name) + } +} + +// Parse cpuset.cpus file to return a list of CPUs in the cgroup +func (c *slurmCollector) parseCpuSet(cpuset string) ([]string, error) { + var cpus []string + var start, end int + var err error + if cpuset == "" { + return nil, nil + } + ranges := strings.Split(cpuset, ",") + for _, r := range ranges { + boundaries := strings.Split(r, "-") + if len(boundaries) == 1 { + start, err = strconv.Atoi(boundaries[0]) + if err != nil { + return nil, err + } + end = start + } else if len(boundaries) == 2 { + start, err = strconv.Atoi(boundaries[0]) + if err != nil { + return nil, err + } + end, err = strconv.Atoi(boundaries[1]) + if err != nil { + return nil, err + } + } + for e := start; e <= end; e++ { + cpu := strconv.Itoa(e) + cpus = append(cpus, cpu) + } + } + return cpus, nil +} + +// Get list of CPUs in the cgroup +func (c *slurmCollector) getCPUs(name string) ([]string, error) { + var cpusPath string + if c.cgroupV2 { + cpusPath = fmt.Sprintf("%s%s/cpuset.cpus.effective", *cgroupfsPath, name) + } else { + cpusPath = fmt.Sprintf("%s/cpuset%s/cpuset.cpus", *cgroupfsPath, name) + } + if !fileExists(cpusPath) { + return nil, nil + } + cpusData, err := os.ReadFile(cpusPath) + if err != nil { + level.Error(c.logger).Log("msg", "Error reading cpuset", "cpuset", cpusPath, "err", err) + return nil, err + } + cpus, err := c.parseCpuSet(strings.TrimSuffix(string(cpusData), "\n")) + if err != nil { + level.Error(c.logger).Log("msg", "Error parsing cpu set", "cpuset", cpusPath, "err", err) + return nil, err + } + return cpus, nil +} + +// Get job details from cgroups v1 +func (c *slurmCollector) getInfoV1(name string, metric *CgroupMetric) { + var err error + pathBase := filepath.Base(name) + userSlicePattern := regexp.MustCompile("^user-([0-9]+).slice$") + userSliceMatch := userSlicePattern.FindStringSubmatch(pathBase) + if len(userSliceMatch) == 2 { + metric.userslice = true + metric.uid, err = strconv.Atoi(userSliceMatch[1]) + if err != nil { + level.Error(c.logger).Log("msg", "Error getting slurm uid number", "uid", pathBase, "err", err) + } + return + } + slurmPattern := regexp.MustCompile("^/slurm/uid_([0-9]+)/job_([0-9]+)(/step_([^/]+)(/task_([[0-9]+))?)?$") + slurmMatch := slurmPattern.FindStringSubmatch(name) + level.Debug(c.logger).Log("msg", "Got for match", "name", name, "len(slurmMatch)", len(slurmMatch), "slurmMatch", fmt.Sprintf("%v", slurmMatch)) + if len(slurmMatch) >= 3 { + metric.uid, err = strconv.Atoi(slurmMatch[1]) + if err != nil { + level.Error(c.logger).Log("msg", "Error getting slurm uid number", "uid", name, "err", err) + } + metric.jobid = slurmMatch[2] + metric.step = slurmMatch[4] + metric.task = slurmMatch[6] + return + } +} + +// Get metrics from cgroups v1 +func (c *slurmCollector) getCgroupsV1Metrics(name string) (CgroupMetric, error) { + metric := CgroupMetric{name: name, batch: "slurm"} + metric.err = false + level.Debug(c.logger).Log("msg", "Loading cgroup v1", "path", name) + ctrl, err := cgroup1.Load(cgroup1.StaticPath(name), cgroup1.WithHiearchy(subsystem)) + if err != nil { + level.Error(c.logger).Log("msg", "Failed to load cgroups", "path", name, "err", err) + metric.err = true + return metric, err + } + stats, err := ctrl.Stat(cgroup1.IgnoreNotExist) + if err != nil { + level.Error(c.logger).Log("msg", "Failed to stat cgroups", "path", name, "err", err) + return metric, err + } + if stats == nil { + level.Error(c.logger).Log("msg", "Cgroup stats are nil", "path", name) + return metric, err + } + if stats.CPU != nil { + if stats.CPU.Usage != nil { + metric.cpuUser = float64(stats.CPU.Usage.User) / 1000000000.0 + metric.cpuSystem = float64(stats.CPU.Usage.Kernel) / 1000000000.0 + metric.cpuTotal = float64(stats.CPU.Usage.Total) / 1000000000.0 + } + } + if stats.Memory != nil { + metric.memoryRSS = float64(stats.Memory.TotalRSS) + metric.memoryCache = float64(stats.Memory.TotalCache) + if stats.Memory.Usage != nil { + metric.memoryUsed = float64(stats.Memory.Usage.Usage) + metric.memoryTotal = float64(stats.Memory.Usage.Limit) + metric.memoryFailCount = float64(stats.Memory.Usage.Failcnt) + } + if stats.Memory.Swap != nil { + metric.memswUsed = float64(stats.Memory.Swap.Usage) + metric.memswTotal = float64(stats.Memory.Swap.Limit) + metric.memswFailCount = float64(stats.Memory.Swap.Failcnt) + } + } + if cpus, err := c.getCPUs(name); err == nil { + metric.cpus = len(cpus) + } + c.getInfoV1(name, &metric) + return metric, nil +} + +// Convenience function that will check if name+metric exists in the data +// and log an error if it does not. It returns 0 in such case but otherwise +// returns the value +func (c *slurmCollector) getOneMetric(name string, metric string, required bool, data map[string]float64) float64 { + val, ok := data[metric] + if !ok && required { + level.Error(c.logger).Log("msg", "Failed to load", "metric", metric, "cgroup", name) + } + return val +} + +// Get Job info for cgroups v2 +func (c *slurmCollector) getInfoV2(name string, metric *CgroupMetric) { + // possibilities are /system.slice/slurmstepd.scope/job_211 + // /system.slice/slurmstepd.scope/job_211/step_interactive + // /system.slice/slurmstepd.scope/job_211/step_extern/user/task_0 + // we never ever get the uid + metric.uid = -1 + // nor is there a userslice + metric.userslice = false + slurmPattern := regexp.MustCompile("^/system.slice/slurmstepd.scope/job_([0-9]+)(/step_([^/]+)(/user/task_([[0-9]+))?)?$") + slurmMatch := slurmPattern.FindStringSubmatch(name) + level.Debug(c.logger).Log("msg", "Got for match", "name", name, "len(slurmMatch)", len(slurmMatch), "slurmMatch", fmt.Sprintf("%v", slurmMatch)) + if len(slurmMatch) == 6 { + metric.jobid = slurmMatch[1] + metric.step = slurmMatch[3] + metric.task = slurmMatch[5] + } +} + +// Get Job metrics from cgroups v2 +func (c *slurmCollector) getCgroupsV2Metrics(name string) (CgroupMetric, error) { + metric := CgroupMetric{name: name, batch: "slurm"} + metric.err = false + level.Debug(c.logger).Log("msg", "Loading cgroup v2", "path", name) + // Files to parse out of the cgroup + controllers := []string{"cpu.stat", "memory.current", "memory.events", "memory.max", "memory.stat"} + data, err := LoadCgroupsV2Metrics(name, controllers) + if err != nil { + level.Error(c.logger).Log("msg", "Failed to load cgroups", "path", name, "err", err) + metric.err = true + return metric, err + } + metric.cpuUser = c.getOneMetric(name, "cpu.stat.user_usec", true, data) / 1000000.0 + metric.cpuSystem = c.getOneMetric(name, "cpu.stat.system_usec", true, data) / 1000000.0 + metric.cpuTotal = c.getOneMetric(name, "cpu.stat.usage_usec", true, data) / 1000000.0 + // we use Oom entry from memory.events - it maps most closely to FailCount + // TODO: add oom_kill as a separate value + metric.memoryFailCount = c.getOneMetric(name, "memory.events.oom", true, data) + // taking Slurm's cgroup v2 as inspiration, swapcached could be missing if swap is off so OK to ignore that case + metric.memoryRSS = c.getOneMetric(name, "memory.stat.anon", true, data) + c.getOneMetric(name, "memory.stat.swapcached", false, data) + // I guess? + metric.memoryCache = c.getOneMetric(name, "memory.stat.file", true, data) + metric.memoryUsed = c.getOneMetric(name, "memory.current", true, data) + metric.memoryTotal = c.getOneMetric(name, "memory.max", true, data) + metric.memswUsed = 0.0 + metric.memswTotal = 0.0 + metric.memswFailCount = 0.0 + if cpus, err := c.getCPUs(name); err == nil { + metric.cpus = len(cpus) + } + c.getInfoV2(name, &metric) + return metric, nil +} \ No newline at end of file diff --git a/collector/slurm_test.go b/collector/slurm_test.go new file mode 100644 index 00000000..5abb061d --- /dev/null +++ b/collector/slurm_test.go @@ -0,0 +1,47 @@ +//go:build !noslurm +// +build !noslurm + +package collector + +import ( + "reflect" + "testing" + + "github.com/alecthomas/kingpin/v2" + "github.com/go-kit/log" +) + +var expectedSlurmMetrics = make(map[string]CgroupMetric) + +func TestSlurmJobMetrics(t *testing.T) { + if _, err := kingpin.CommandLine.Parse([]string{"--path.cgroupfs", "fixtures/sys/fs/cgroup"}); err != nil { + t.Fatal(err) + } + c := slurmCollector{cgroupV2: true, logger: log.NewNopLogger()} + metrics, err := c.getJobsMetrics() + expectedSlurmMetrics["/system.slice/slurmstepd.scope/job_1009248"] = CgroupMetric{ + name: "/system.slice/slurmstepd.scope/job_1009248", + cpuUser: 60375.292848, + cpuSystem: 115.777502, + cpuTotal: 60491.070351, + cpus: 2, + memoryRSS: 4.098592768e+09, + memoryCache: 0, + memoryUsed: 4.111491072e+09, + memoryTotal: 4.294967296e+09, + memoryFailCount: 0, + memswUsed: 0, + memswTotal: 0, + memswFailCount: 0, + userslice: false, + uid: -1, + jobid: "1009248", + batch: "slurm", + err: false} + if err != nil { + t.Fatalf("Cannot retrieve data from getJobsMetrics function: %v ", err) + } + if !reflect.DeepEqual(metrics, expectedSlurmMetrics) { + t.Fatalf("Expected metrics data is %+v: \nGot %+v", expectedSlurmMetrics, metrics) + } +} diff --git a/collector/utils.go b/collector/utils.go new file mode 100644 index 00000000..6de38622 --- /dev/null +++ b/collector/utils.go @@ -0,0 +1,46 @@ +//go:build !nostat +// +build !nostat + +package collector + +import ( + "os" + "path/filepath" + "strconv" + "strings" +) + +// Load cgroups v2 metrics from a given path +func LoadCgroupsV2Metrics(name string, controllers []string) (map[string]float64, error) { + data := make(map[string]float64) + + for _, fName := range controllers { + contents, err := os.ReadFile(filepath.Join(*cgroupfsPath, name, fName)) + if err != nil { + return data, err + } + for _, line := range strings.Split(string(contents), "\n") { + // Some of the above have a single value and others have a "data_name 123" + parts := strings.Fields(line) + indName := fName + indData := 0 + if len(parts) == 1 || len(parts) == 2 { + if len(parts) == 2 { + indName += "." + parts[0] + indData = 1 + } + if parts[indData] == "max" { + data[indName] = -1.0 + } else { + f, err := strconv.ParseFloat(parts[indData], 64) + if err == nil { + data[indName] = f + } else { + return data, err + } + } + } + } + } + return data, nil +} diff --git a/e2e-test.sh b/e2e-test.sh new file mode 100755 index 00000000..5b714e2a --- /dev/null +++ b/e2e-test.sh @@ -0,0 +1,103 @@ +#!/usr/bin/env bash + +set -euf -o pipefail + +cd "$(dirname $0)" + +port="$((10000 + (RANDOM % 10000)))" +tmpdir=$(mktemp -d /tmp/batchjob_exporter_e2e_test.XXXXXX) + +skip_re="^(go_|batchjob_exporter_build_info|batchjob_scrape_collector_duration_seconds|process_|batchjob_textfile_mtime_seconds|batchjob_time_(zone|seconds)|batchjob_network_(receive|transmit)_(bytes|packets)_total)" + +arch="$(uname -m)" + +fixture='collector/fixtures/e2e-test-output.txt' + +keep=0; update=0; verbose=0 +while getopts 'hkuv' opt +do + case "$opt" in + k) + keep=1 + ;; + u) + update=1 + ;; + v) + verbose=1 + set -x + ;; + *) + echo "Usage: $0 [-k] [-u] [-v]" + echo " -k: keep temporary files and leave batchjob_exporter running" + echo " -u: update fixture" + echo " -v: verbose output" + exit 1 + ;; + esac +done + +if [ ! -x ./batchjob_exporter ] +then + echo './batchjob_exporter not found. Consider running `go build` first.' >&2 + exit 1 +fi + +PATH=$PWD/collector/fixtures:$PATH ./batchjob_exporter \ + --path.sysfs="collector/fixtures/sys" \ + --path.cgroupfs="collector/fixtures/sys/fs/cgroup" \ + --collector.ipmi.dcmi.wrapper.path="collector/fixtures/ipmi-dcmi-wrapper.sh" \ + --collector.nvidia_gpu \ + --collector.nvidia.gpu.stat.path="collector/fixtures/gpustat" \ + --web.listen-address "127.0.0.1:${port}" \ + --log.level="debug" > "${tmpdir}/batchjob_exporter.log" 2>&1 & + +echo $! > "${tmpdir}/batchjob_exporter.pid" + +finish() { + if [ $? -ne 0 -o ${verbose} -ne 0 ] + then + cat << EOF >&2 +LOG ===================== +$(cat "${tmpdir}/batchjob_exporter.log") +========================= +EOF + fi + + if [ ${update} -ne 0 ] + then + cp "${tmpdir}/e2e-test-output.txt" "${fixture}" + fi + + if [ ${keep} -eq 0 ] + then + kill -9 "$(cat ${tmpdir}/batchjob_exporter.pid)" + # This silences the "Killed" message + set +e + wait "$(cat ${tmpdir}/batchjob_exporter.pid)" > /dev/null 2>&1 + rm -rf "${tmpdir}" + fi +} + +trap finish EXIT + +get() { + if command -v curl > /dev/null 2>&1 + then + curl -s -f "$@" + elif command -v wget > /dev/null 2>&1 + then + wget -O - "$@" + else + echo "Neither curl nor wget found" + exit 1 + fi +} + +sleep 1 + +get "127.0.0.1:${port}/metrics" | grep -E -v "${skip_re}" > "${tmpdir}/e2e-test-output.txt" + +diff -u \ + "${fixture}" \ + "${tmpdir}/e2e-test-output.txt" diff --git a/go.mod b/go.mod new file mode 100644 index 00000000..7276e492 --- /dev/null +++ b/go.mod @@ -0,0 +1,41 @@ +module github.com/mahendrapaipuri/batchjob_exporter + +go 1.21 + +require ( + github.com/alecthomas/kingpin/v2 v2.3.2 + github.com/containerd/cgroups/v3 v3.0.2 + github.com/go-kit/log v0.2.1 + github.com/prometheus/client_golang v1.17.0 + github.com/prometheus/common v0.45.0 + github.com/prometheus/exporter-toolkit v0.10.0 + github.com/prometheus/procfs v0.12.0 +) + +require ( + github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/docker/go-units v0.4.0 // indirect + github.com/go-logfmt/logfmt v0.5.1 // indirect + github.com/godbus/dbus/v5 v5.1.0 // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/jpillora/backoff v1.0.0 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect + github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect + github.com/opencontainers/runtime-spec v1.0.2 // indirect + github.com/prometheus/client_model v0.5.0 // indirect + github.com/stretchr/testify v1.8.4 // indirect + github.com/xhit/go-str2duration/v2 v2.1.0 // indirect + golang.org/x/crypto v0.14.0 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/oauth2 v0.12.0 // indirect + golang.org/x/sync v0.3.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/protobuf v1.31.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 00000000..c01fd6ea --- /dev/null +++ b/go.sum @@ -0,0 +1,97 @@ +github.com/alecthomas/kingpin/v2 v2.3.2 h1:H0aULhgmSzN8xQ3nX1uxtdlTHYoPLu5AhHxWrKI6ocU= +github.com/alecthomas/kingpin/v2 v2.3.2/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/containerd/cgroups/v3 v3.0.2 h1:f5WFqIVSgo5IZmtTT3qVBo6TzI1ON6sycSBKkymb9L0= +github.com/containerd/cgroups/v3 v3.0.2/go.mod h1:JUgITrzdFqp42uI2ryGA+ge0ap/nxzYgkGmIcetmErE= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= +github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= +github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= +github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= +github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= +github.com/prometheus/exporter-toolkit v0.10.0 h1:yOAzZTi4M22ZzVxD+fhy1URTuNRj/36uQJJ5S8IPza8= +github.com/prometheus/exporter-toolkit v0.10.0/go.mod h1:+sVFzuvV5JDyw+Ih6p3zFxZNVnKQa3x5qPmDSiPu4ZY= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= +github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4= +golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/staticcheck.conf b/staticcheck.conf new file mode 100644 index 00000000..82a0d9b0 --- /dev/null +++ b/staticcheck.conf @@ -0,0 +1 @@ +checks = ["all", "ST1003"] diff --git a/ttar b/ttar new file mode 100755 index 00000000..b0171a12 --- /dev/null +++ b/ttar @@ -0,0 +1,389 @@ +#!/usr/bin/env bash + +# Purpose: plain text tar format +# Limitations: - only suitable for text files, directories, and symlinks +# - stores only filename, content, and mode +# - not designed for untrusted input +# +# Note: must work with bash version 3.2 (macOS) + +# Copyright 2017 Roger Luethi +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit -o nounset + +# Sanitize environment (for instance, standard sorting of glob matches) +export LC_ALL=C + +path="" +CMD="" +ARG_STRING="$*" + +#------------------------------------------------------------------------------ +# Not all sed implementations can work on null bytes. In order to make ttar +# work out of the box on macOS, use Python as a stream editor. + +USE_PYTHON=0 + +PYTHON_CREATE_FILTER=$(cat << 'PCF' +#!/usr/bin/env python + +import re +import sys + +for line in sys.stdin: + line = re.sub(r'EOF', r'\EOF', line) + line = re.sub(r'NULLBYTE', r'\NULLBYTE', line) + line = re.sub('\x00', r'NULLBYTE', line) + sys.stdout.write(line) +PCF +) + +PYTHON_EXTRACT_FILTER=$(cat << 'PEF' +#!/usr/bin/env python + +import re +import sys + +for line in sys.stdin: + line = re.sub(r'(?/dev/null; then + echo "ERROR Python not found. Aborting." + exit 2 + fi + USE_PYTHON=1 + fi +} + +#------------------------------------------------------------------------------ + +function usage { + bname=$(basename "$0") + cat << USAGE +Usage: $bname [-C ] -c -f (create archive) + $bname -t -f (list archive contents) + $bname [-C ] -x -f (extract archive) + +Options: + -C (change directory) + -v (verbose) + +Example: Change to sysfs directory, create ttar file from fixtures directory + $bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/ +USAGE +exit "$1" +} + +function vecho { + if [ "${VERBOSE:-}" == "yes" ]; then + echo >&7 "$@" + fi +} + +function set_cmd { + if [ -n "$CMD" ]; then + echo "ERROR: more than one command given" + echo + usage 2 + fi + CMD=$1 +} + +unset VERBOSE + +while getopts :cf:htxvC: opt; do + case $opt in + c) + set_cmd "create" + ;; + f) + ARCHIVE=$OPTARG + ;; + h) + usage 0 + ;; + t) + set_cmd "list" + ;; + x) + set_cmd "extract" + ;; + v) + VERBOSE=yes + exec 7>&1 + ;; + C) + CDIR=$OPTARG + ;; + *) + echo >&2 "ERROR: invalid option -$OPTARG" + echo + usage 1 + ;; + esac +done + +# Remove processed options from arguments +shift $(( OPTIND - 1 )); + +if [ "${CMD:-}" == "" ]; then + echo >&2 "ERROR: no command given" + echo + usage 1 +elif [ "${ARCHIVE:-}" == "" ]; then + echo >&2 "ERROR: no archive name given" + echo + usage 1 +fi + +function list { + local path="" + local size=0 + local line_no=0 + local ttar_file=$1 + if [ -n "${2:-}" ]; then + echo >&2 "ERROR: too many arguments." + echo + usage 1 + fi + if [ ! -e "$ttar_file" ]; then + echo >&2 "ERROR: file not found ($ttar_file)" + echo + usage 1 + fi + while read -r line; do + line_no=$(( line_no + 1 )) + if [ $size -gt 0 ]; then + size=$(( size - 1 )) + continue + fi + if [[ $line =~ ^Path:\ (.*)$ ]]; then + path=${BASH_REMATCH[1]} + elif [[ $line =~ ^Lines:\ (.*)$ ]]; then + size=${BASH_REMATCH[1]} + echo "$path" + elif [[ $line =~ ^Directory:\ (.*)$ ]]; then + path=${BASH_REMATCH[1]} + echo "$path/" + elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then + echo "$path -> ${BASH_REMATCH[1]}" + fi + done < "$ttar_file" +} + +function extract { + local path="" + local size=0 + local line_no=0 + local ttar_file=$1 + if [ -n "${2:-}" ]; then + echo >&2 "ERROR: too many arguments." + echo + usage 1 + fi + if [ ! -e "$ttar_file" ]; then + echo >&2 "ERROR: file not found ($ttar_file)" + echo + usage 1 + fi + while IFS= read -r line; do + line_no=$(( line_no + 1 )) + local eof_without_newline + if [ "$size" -gt 0 ]; then + if [[ "$line" =~ [^\\]EOF ]]; then + # An EOF not preceeded by a backslash indicates that the line + # does not end with a newline + eof_without_newline=1 + else + eof_without_newline=0 + fi + # Replace NULLBYTE with null byte if at beginning of line + # Replace NULLBYTE with null byte unless preceeded by backslash + # Remove one backslash in front of NULLBYTE (if any) + # Remove EOF unless preceeded by backslash + # Remove one backslash in front of EOF + if [ $USE_PYTHON -eq 1 ]; then + echo -n "$line" | python -c "$PYTHON_EXTRACT_FILTER" >> "$path" + else + # The repeated pattern makes up for sed's lack of negative + # lookbehind assertions (for consecutive null bytes). + echo -n "$line" | \ + sed -e 's/^NULLBYTE/\x0/g; + s/\([^\\]\)NULLBYTE/\1\x0/g; + s/\([^\\]\)NULLBYTE/\1\x0/g; + s/\\NULLBYTE/NULLBYTE/g; + s/\([^\\]\)EOF/\1/g; + s/\\EOF/EOF/g; + ' >> "$path" + fi + if [[ "$eof_without_newline" -eq 0 ]]; then + echo >> "$path" + fi + size=$(( size - 1 )) + continue + fi + if [[ $line =~ ^Path:\ (.*)$ ]]; then + path=${BASH_REMATCH[1]} + if [ -e "$path" ] || [ -L "$path" ]; then + rm "$path" + fi + elif [[ $line =~ ^Lines:\ (.*)$ ]]; then + size=${BASH_REMATCH[1]} + # Create file even if it is zero-length. + touch "$path" + vecho " $path" + elif [[ $line =~ ^Mode:\ (.*)$ ]]; then + mode=${BASH_REMATCH[1]} + chmod "$mode" "$path" + vecho "$mode" + elif [[ $line =~ ^Directory:\ (.*)$ ]]; then + path=${BASH_REMATCH[1]} + mkdir -p "$path" + vecho " $path/" + elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then + ln -s "${BASH_REMATCH[1]}" "$path" + vecho " $path -> ${BASH_REMATCH[1]}" + elif [[ $line =~ ^# ]]; then + # Ignore comments between files + continue + else + echo >&2 "ERROR: Unknown keyword on line $line_no: $line" + exit 1 + fi + done < "$ttar_file" +} + +function div { + echo "# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -" \ + "- - - - - -" +} + +function get_mode { + local mfile=$1 + if [ -z "${STAT_OPTION:-}" ]; then + if stat -c '%a' "$mfile" >/dev/null 2>&1; then + # GNU stat + STAT_OPTION='-c' + STAT_FORMAT='%a' + else + # BSD stat + STAT_OPTION='-f' + # Octal output, user/group/other (omit file type, sticky bit) + STAT_FORMAT='%OLp' + fi + fi + stat "${STAT_OPTION}" "${STAT_FORMAT}" "$mfile" +} + +function _create { + shopt -s nullglob + local mode + local eof_without_newline + while (( "$#" )); do + file=$1 + if [ -L "$file" ]; then + echo "Path: $file" + symlinkTo=$(readlink "$file") + echo "SymlinkTo: $symlinkTo" + vecho " $file -> $symlinkTo" + div + elif [ -d "$file" ]; then + # Strip trailing slash (if there is one) + file=${file%/} + echo "Directory: $file" + mode=$(get_mode "$file") + echo "Mode: $mode" + vecho "$mode $file/" + div + # Find all files and dirs, including hidden/dot files + for x in "$file/"{*,.[^.]*}; do + _create "$x" + done + elif [ -f "$file" ]; then + echo "Path: $file" + lines=$(wc -l "$file"|awk '{print $1}') + eof_without_newline=0 + if [[ "$(wc -c "$file"|awk '{print $1}')" -gt 0 ]] && \ + [[ "$(tail -c 1 "$file" | wc -l)" -eq 0 ]]; then + eof_without_newline=1 + lines=$((lines+1)) + fi + echo "Lines: $lines" + # Add backslash in front of EOF + # Add backslash in front of NULLBYTE + # Replace null byte with NULLBYTE + if [ $USE_PYTHON -eq 1 ]; then + < "$file" python -c "$PYTHON_CREATE_FILTER" + else + < "$file" \ + sed 's/EOF/\\EOF/g; + s/NULLBYTE/\\NULLBYTE/g; + s/\x0/NULLBYTE/g; + ' + fi + if [[ "$eof_without_newline" -eq 1 ]]; then + # Finish line with EOF to indicate that the original line did + # not end with a linefeed + echo "EOF" + fi + mode=$(get_mode "$file") + echo "Mode: $mode" + vecho "$mode $file" + div + else + echo >&2 "ERROR: file not found ($file in $(pwd))" + exit 2 + fi + shift + done +} + +function create { + ttar_file=$1 + shift + if [ -z "${1:-}" ]; then + echo >&2 "ERROR: missing arguments." + echo + usage 1 + fi + if [ -e "$ttar_file" ]; then + rm "$ttar_file" + fi + exec > "$ttar_file" + echo "# Archive created by ttar $ARG_STRING" + _create "$@" +} + +test_environment + +if [ -n "${CDIR:-}" ]; then + if [[ "$ARCHIVE" != /* ]]; then + # Relative path: preserve the archive's location before changing + # directory + ARCHIVE="$(pwd)/$ARCHIVE" + fi + cd "$CDIR" +fi + +"$CMD" "$ARCHIVE" "$@"