diff --git a/.github/actions/delivery/action.yml b/.github/actions/delivery/action.yml index a9ae5f3c240..66ed19e6db8 100644 --- a/.github/actions/delivery/action.yml +++ b/.github/actions/delivery/action.yml @@ -31,12 +31,12 @@ runs: - name: Build name for DEB shell: bash - if: ${{ inputs.distrib == 'bullseye' }} + if: ${{ contains(fromJSON('["bullseye", "bookworm", "jammy"]'), inputs.distrib) }} run: | echo "extfile=deb" >> $GITHUB_ENV - name: Use cache files - uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 + uses: actions/cache/restore@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4.0.0 with: path: ./*.${{ env.extfile }} key: ${{ inputs.cache_key }} @@ -46,7 +46,7 @@ runs: JF_URL: https://centreon.jfrog.io JF_ACCESS_TOKEN: ${{ inputs.artifactory_token }} - - if: ${{ startsWith(inputs.distrib, 'el') }} + - if: ${{ env.extfile == 'rpm' }} name: Publish RPMs run: | FILES="*.${{ env.extfile }}" @@ -89,7 +89,7 @@ runs: done shell: bash - - if: ${{ inputs.distrib == 'bullseye' }} + - if: ${{ env.extfile == 'deb' }} name: Publish DEBs run: | FILES="*.${{ env.extfile }}" @@ -113,6 +113,12 @@ runs: echo "[DEBUG] - Version: $VERSION" - jf rt upload "$FILE" "apt-standard-${{ inputs.version }}-${{ inputs.stability }}/pool/${{ inputs.module_name }}/" --deb "${{ inputs.distrib }}/main/$ARCH" + if [[ "${{ inputs.distrib }}" == "jammy" ]]; then + REPO_PREFIX="ubuntu" + else + REPO_PREFIX="apt" + fi + + jf rt upload "$FILE" "${REPO_PREFIX}-standard-${{ inputs.version }}-${{ inputs.stability }}/pool/${{ inputs.module_name }}/" --deb "${{ inputs.distrib }}/main/$ARCH" done shell: bash diff --git a/.github/actions/package/action.yml b/.github/actions/package/action.yml index 3e12de73d08..b51c1ae496e 100644 --- a/.github/actions/package/action.yml +++ b/.github/actions/package/action.yml @@ -34,6 +34,9 @@ inputs: rpm_gpg_signing_passphrase: description: The rpm gpg signing passphrase required: true + stability: + description: "branch stability (stable, testing, unstable, canary)" + required: true runs: using: composite @@ -58,6 +61,11 @@ runs: export DIST=".${{ inputs.distrib }}" else export DIST="" + if [ "${{ inputs.stability }}" = "unstable" ] || [ "${{ inputs.stability }}" = "canary" ]; then + export RELEASE="$RELEASE~${{ inputs.distrib }}" + else + export RELEASE="1~${{ inputs.distrib }}" + fi fi export RPM_SIGNING_KEY_FILE="$(pwd)/key.gpg" @@ -67,6 +75,9 @@ runs: for FILE in ${{ inputs.nfpm_file_pattern }}; do DIRNAME=$(dirname $FILE) BASENAME=$(basename $FILE) + if [ -f $DIRNAME/env/.env.${{ inputs.distrib }} ]; then + source $DIRNAME/env/.env.${{ inputs.distrib }} + fi cd $DIRNAME sed -i "s/@COMMIT_HASH@/${{ inputs.commit_hash }}/g" $BASENAME nfpm package --config $BASENAME --packager ${{ inputs.package_extension }} @@ -76,7 +87,7 @@ runs: shell: bash - name: Cache packages - uses: actions/cache/save@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 + uses: actions/cache/save@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4.0.0 with: path: ./*.${{ inputs.package_extension }} key: ${{ inputs.cache_key }} diff --git a/.github/actions/promote-to-stable/action.yml b/.github/actions/promote-to-stable/action.yml index f61b16d27dc..dfb7f6af133 100644 --- a/.github/actions/promote-to-stable/action.yml +++ b/.github/actions/promote-to-stable/action.yml @@ -22,6 +22,9 @@ inputs: repository_name: description: "The repository name" required: true + github_base_ref: + description: "Release base ref" + required: true runs: using: "composite" @@ -41,7 +44,7 @@ runs: # Cloud specific promote # delivery by default to onprem, override to internal if base branch is master - if [[ $GITHUB_BASE_REF == "master" ]]; then + if [[ ${{ inputs.github_base_ref }} == "master" ]]; then ROOT_REPO_PATH="rpm-standard-internal" else ROOT_REPO_PATH="rpm-standard" @@ -75,7 +78,7 @@ runs: shell: bash - name: Promote DEB packages to stable - if: ${{ startsWith(inputs.distrib, 'bullseye') }} + if: ${{ contains(fromJSON('["bullseye", "bookworm"]'), inputs.distrib) }} run: | echo "[DEBUG] - Major version: ${{ inputs.major_version }}" echo "[DEBUG] - Minor version: ${{ inputs.minor_version }}" diff --git a/.github/docker/Dockerfile.centreon-collect-alma8 b/.github/docker/Dockerfile.centreon-collect-alma8 index 5e09d3a516b..6fbfc962d31 100644 --- a/.github/docker/Dockerfile.centreon-collect-alma8 +++ b/.github/docker/Dockerfile.centreon-collect-alma8 @@ -2,7 +2,7 @@ ARG REGISTRY_URL FROM ${REGISTRY_URL}/almalinux:8 -RUN <> /etc/yum.conf && \ @@ -55,6 +55,8 @@ dnf install -y cmake \ dnf update libarchive +dnf clean all + pip3 install conan==1.61.0 --prefix=/usr --upgrade rm -rf ~/.conan/profiles/default @@ -62,7 +64,7 @@ EOF COPY conanfile.txt . -RUN <> /etc/yum.conf @@ -48,6 +48,8 @@ dnf --best install -y cmake \ nfpm \ sudo +dnf clean all + pip3 install conan==1.61.0 --prefix=/usr --upgrade rm -rf ~/.conan/profiles/default @@ -55,7 +57,7 @@ EOF COPY conanfile.txt . -RUN <> /etc/yum.conf @@ -38,6 +38,8 @@ dnf --best install -y \ psmisc \ sudo +dnf clean all + echo "install robot and dependencies" pip3 install -U robotframework robotframework-databaselibrary robotframework-httpctrl robotframework-examples pymysql python-dateutil psutil diff --git a/.github/docker/Dockerfile.centreon-collect-debian-bookworm b/.github/docker/Dockerfile.centreon-collect-debian-bookworm new file mode 100644 index 00000000000..1a3fea0b1e3 --- /dev/null +++ b/.github/docker/Dockerfile.centreon-collect-debian-bookworm @@ -0,0 +1,59 @@ +ARG REGISTRY_URL + +FROM ${REGISTRY_URL}/debian:bookworm + +RUN bash -e <> /etc/yum.conf @@ -48,6 +48,8 @@ dnf --best install -y cmake \ nfpm \ sudo +dnf clean all + pip3 install conan==1.61.0 --prefix=/usr --upgrade rm -rf ~/.conan/profiles/default @@ -55,7 +57,7 @@ EOF COPY conanfile.txt . -RUN <> /etc/yum.conf @@ -38,6 +38,8 @@ dnf --best install -y gcc \ sudo \ iproute +dnf clean all + echo "install robot and dependencies" pip3 install -U robotframework robotframework-databaselibrary robotframework-httpctrl robotframework-examples pymysql python-dateutil psutil diff --git a/.github/docker/Dockerfile.centreon-collect-ubuntu-jammy b/.github/docker/Dockerfile.centreon-collect-ubuntu-jammy new file mode 100644 index 00000000000..8a85e5f7f65 --- /dev/null +++ b/.github/docker/Dockerfile.centreon-collect-ubuntu-jammy @@ -0,0 +1,59 @@ +ARG REGISTRY_URL + +FROM ${REGISTRY_URL}/ubuntu:jammy + +RUN bash -e < /tmp/centreon.sql @@ -58,7 +58,11 @@ cat resources/centreon.sql | sed "s/DBNameConf/centreon/g" > /tmp/centreon.sql mysql -u root_centreon -pcentreon < resources/centreon_storage.sql mysql -u root_centreon -pcentreon < /tmp/centreon.sql -cd tests +if [ $database_type == 'mysql' ]; then + killall -w mysqldtoto +else + killall -w mariadbd +fi if [ "$distrib" = "ALMALINUX" ]; then dnf groupinstall -y "Development Tools" @@ -68,28 +72,3 @@ else apt-get install -y build-essential apt-get install -y python3-dev fi - - -echo "########################## Install centreon collect ###########################" -cd .. -echo "Installation..." -if [ "$distrib" = "ALMALINUX" ]; then - dnf clean all - rm -f ./*-selinux-*.rpm # avoid to install selinux packages which are dependent to centreon-common-selinux - dnf install -y ./*.rpm -else - apt-get update -# apt-get install -y ./*.deb -fi - - -if [ $database_type == 'mysql' ]; then - killall -w mysqldtoto -else - killall -w mariadbd -fi - -cd .. - -\rm -rf * - diff --git a/.github/scripts/collect-unit-tests.sh b/.github/scripts/collect-unit-tests.sh index 6ccf25a4b2c..a9a95f8e173 100755 --- a/.github/scripts/collect-unit-tests.sh +++ b/.github/scripts/collect-unit-tests.sh @@ -7,7 +7,9 @@ mkdir /src/build cd /src/build/ conan install .. -s compiler.cppstd=17 -s compiler.libcxx=libstdc++11 --build=missing -if [ $(cat /etc/issue | awk '{print $1}') = "Debian" ] ; then + +OS_ID=$(cat /etc/issue | awk '{print $1}') +if [[ "$OS_ID" == "Debian" || "$OS_ID" == "Ubuntu" ]] ; then CXXFLAGS="-Wall -Wextra" cmake -G "Ninja" -DCMAKE_EXPORT_COMPILE_COMMANDS=On -DWITH_CENTREON_CLIB_INCLUDE_DIR=../clib/inc/ -DWITH_CENTREON_CLIB_LIBRARIES=centreon-clib/libcentreon_clib.so -DCMAKE_BUILD_TYPE=Debug -DWITH_PREFIX=/usr -DWITH_PREFIX_BIN=/usr/sbin -DWITH_USER_BROKER=centreon-broker -DWITH_USER_ENGINE=centreon-engine -DWITH_GROUP_BROKER=centreon-broker -DWITH_GROUP_ENGINE=centreon-engine -DWITH_TESTING=On -DWITH_PREFIX_MODULES=/usr/share/centreon/lib/centreon-broker -DWITH_PREFIX_CONF_BROKER=/etc/centreon-broker -DWITH_PREFIX_LIB_BROKER=/usr/lib64/nagios -DWITH_PREFIX_CONF_ENGINE=/etc/centreon-engine -DWITH_PREFIX_LIB_ENGINE=/usr/lib64/centreon-engine -DWITH_PREFIX_LIB_CLIB=/usr/lib64/ -DWITH_RW_DIR=/var/lib/centreon-engine/rw -DWITH_VAR_DIR=/var/log/centreon-engine -DWITH_MODULE_SIMU=On -DNG=On .. else CXXFLAGS="-Wall -Wextra" cmake3 -G "Ninja" -DCMAKE_EXPORT_COMPILE_COMMANDS=On -DWITH_CENTREON_CLIB_INCLUDE_DIR=../clib/inc/ -DWITH_CENTREON_CLIB_LIBRARIES=centreon-clib/libcentreon_clib.so -DCMAKE_BUILD_TYPE=Debug -DWITH_PREFIX=/usr -DWITH_PREFIX_BIN=/usr/sbin -DWITH_USER_BROKER=centreon-broker -DWITH_USER_ENGINE=centreon-engine -DWITH_GROUP_BROKER=centreon-broker -DWITH_GROUP_ENGINE=centreon-engine -DWITH_TESTING=On -DWITH_PREFIX_MODULES=/usr/share/centreon/lib/centreon-broker -DWITH_PREFIX_CONF_BROKER=/etc/centreon-broker -DWITH_PREFIX_LIB_BROKER=/usr/lib64/nagios -DWITH_PREFIX_CONF_ENGINE=/etc/centreon-engine -DWITH_PREFIX_LIB_ENGINE=/usr/lib64/centreon-engine -DWITH_PREFIX_LIB_CLIB=/usr/lib64/ -DWITH_RW_DIR=/var/lib/centreon-engine/rw -DWITH_VAR_DIR=/var/log/centreon-engine -DWITH_MODULE_SIMU=On -DNG=On .. diff --git a/.github/workflows/centreon-collect.yml b/.github/workflows/centreon-collect.yml index b339f222923..7c9ae6aaeae 100644 --- a/.github/workflows/centreon-collect.yml +++ b/.github/workflows/centreon-collect.yml @@ -54,7 +54,7 @@ jobs: strategy: fail-fast: false matrix: - image: [alma8, alma9, debian-bullseye] + image: [alma8, alma9, debian-bullseye, debian-bookworm] name: unit test ${{ matrix.image }} steps: @@ -85,6 +85,7 @@ jobs: img_version: ${{ needs.get-version.outputs.img_version }} release: ${{ needs.get-version.outputs.release }} commit_hash: ${{ github.sha }} + stability: ${{ needs.get-version.outputs.stability }} secrets: inherit deliver-sources: @@ -134,7 +135,7 @@ jobs: distrib: ${{ matrix.distrib }} version: ${{ needs.get-version.outputs.version }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} - cache_key: cache-${{ github.sha }}-rpm-centreon-collect-${{ matrix.distrib }}-${{ matrix.arch }}-${{ github.head_ref || github.ref_name }} + cache_key: ${{ github.run_id }}-${{ github.sha }}-rpm-centreon-collect-${{ matrix.distrib }}-${{ matrix.arch }}-${{ github.head_ref || github.ref_name }} stability: ${{ needs.get-version.outputs.stability }} deliver-deb: @@ -149,6 +150,10 @@ jobs: arch: amd64 - distrib: bullseye arch: arm64 + - distrib: bookworm + arch: amd64 + - distrib: jammy + arch: amd64 name: deliver ${{ matrix.distrib }} @@ -163,7 +168,7 @@ jobs: distrib: ${{ matrix.distrib }} version: ${{ needs.get-version.outputs.version }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} - cache_key: cache-${{ github.sha }}-deb-centreon-collect-${{ matrix.distrib }}-${{ matrix.arch }}-${{ github.head_ref || github.ref_name }} + cache_key: ${{ github.run_id }}-${{ github.sha }}-deb-centreon-collect-${{ matrix.distrib }}-${{ matrix.arch }}-${{ github.head_ref || github.ref_name }} stability: ${{ needs.get-version.outputs.stability }} promote: @@ -172,7 +177,7 @@ jobs: runs-on: [self-hosted, common] strategy: matrix: - distrib: [el8, el9, bullseye] + distrib: [el8, el9, bullseye, bookworm, jammy] steps: - name: Checkout sources @@ -188,3 +193,4 @@ jobs: minor_version: ${{ needs.get-version.outputs.patch }} stability: ${{ needs.get-version.outputs.stability }} repository_name: standard + github_base_ref: ${{ github.base_ref }} diff --git a/.github/workflows/docker-builder.yml b/.github/workflows/docker-builder.yml index 7fa2db996a0..6824ed50c02 100644 --- a/.github/workflows/docker-builder.yml +++ b/.github/workflows/docker-builder.yml @@ -55,6 +55,18 @@ jobs: dockerfile: centreon-collect-debian-bullseye-test image: centreon-collect-debian-bullseye-test tag: ${{ needs.get-version.outputs.test_img_version }} + - runner: collect + dockerfile: centreon-collect-debian-bookworm + image: centreon-collect-debian-bookworm + tag: ${{ needs.get-version.outputs.img_version }} + - runner: collect + dockerfile: centreon-collect-debian-bookworm-test + image: centreon-collect-debian-bookworm-test + tag: ${{ needs.get-version.outputs.test_img_version }} + - runner: collect + dockerfile: centreon-collect-ubuntu-jammy + image: centreon-collect-ubuntu-jammy + tag: ${{ needs.get-version.outputs.img_version }} - runner: collect-arm64 dockerfile: centreon-collect-debian-bullseye image: centreon-collect-debian-bullseye-arm64 @@ -86,10 +98,10 @@ jobs: username: ${{ secrets.DOCKER_REGISTRY_ID }} password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} - - uses: docker/setup-buildx-action@885d1462b80bc1c1c7f0b00334ad271f09369c55 # v2.10.0 + - uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0 - name: Build image ${{ matrix.image }}:${{ matrix.tag }} - uses: docker/build-push-action@1104d471370f9806843c095c1db02b5a90c5f8b6 # v3.3.1 + uses: docker/build-push-action@4a13e500e55cf31b7a5d59a38ab2040ab0f42f56 # v5.1.0 with: file: .github/docker/Dockerfile.${{ matrix.dockerfile }} context: . diff --git a/.github/workflows/libzmq.yml b/.github/workflows/libzmq.yml new file mode 100644 index 00000000000..4fa4309a73d --- /dev/null +++ b/.github/workflows/libzmq.yml @@ -0,0 +1,223 @@ +name: libzmq + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +on: + workflow_dispatch: + pull_request: + paths: + - '.github/workflows/libzmq.yml' + push: + branches: + - develop + - dev-[2-9][0-9].[0-9][0-9].x + - master + - "[2-9][0-9].[0-9][0-9].x" + paths: + - '.github/workflows/libzmq.yml' + +jobs: + get-version: + uses: ./.github/workflows/get-version.yml + + package-rpm: + needs: [get-version] + + strategy: + fail-fast: false + matrix: + include: + - image: packaging-nfpm-alma8 + distrib: el8 + arch: amd64 + - image: packaging-nfpm-alma9 + distrib: el9 + arch: amd64 + + runs-on: ubuntu-22.04 + + container: + image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-version.outputs.version }} + credentials: + username: ${{ secrets.DOCKER_REGISTRY_ID }} + password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + + name: package ${{ matrix.distrib }} + + steps: + - name: package rpm + run: | + dnf install -y wget rpmdevtools rpmlint epel-release + dnf config-manager --set-enabled crb || true # alma 9 + dnf config-manager --set-enabled powertools || true # alma 8 + dnf install -y asciidoc autoconf automake gcc gcc-c++ glib2-devel libbsd-devel libtool make rpm-build xmlto + + cd /github/home + wget -O - https://github.com/zeromq/libzmq/archive/refs/tags/v4.3.5.tar.gz | tar zxvf - + mkdir -p /github/home/rpmbuild/{BUILD,BUILDROOT,RPMS,SOURCES,SPECS,SRPMS} + cp libzmq-4.3.5/packaging/redhat/zeromq.spec /github/home/rpmbuild/SPECS/ + wget https://github.com/zeromq/libzmq/releases/download/v4.3.5/zeromq-4.3.5.tar.gz -O /github/home/rpmbuild/SOURCES/zeromq-4.3.5.tar.gz + rpmbuild -bb /github/home/rpmbuild/SPECS/zeromq.spec + cd - + + mv /github/home/rpmbuild/RPMS/x86_64/*.rpm ./ + rm -f zeromq-debugsource-*.rpm libzmq5-debuginfo-*.rpm + shell: bash + + - name: cache rpm + uses: actions/cache/save@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4.0.0 + with: + path: ./*.rpm + key: ${{ github.run_id }}-${{ github.sha }}-rpm-libzmq-${{ matrix.distrib }}-${{ matrix.arch }} + + package-deb: + needs: [get-version] + + strategy: + fail-fast: false + matrix: + include: + - image: packaging-nfpm-bullseye + distrib: bullseye + runner: ubuntu-22.04 + arch: amd64 + - image: packaging-nfpm-bookworm + distrib: bookworm + runner: ubuntu-22.04 + arch: amd64 + - image: packaging-nfpm-jammy + distrib: jammy + runner: ubuntu-22.04 + arch: amd64 + - image: packaging-bullseye-arm64 + distrib: bullseye + runner: ["self-hosted", "collect-arm64"] + arch: arm64 + + runs-on: ${{ matrix.runner }} + + container: + image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-version.outputs.version }} + credentials: + username: ${{ secrets.DOCKER_REGISTRY_ID }} + password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + + name: package ${{ matrix.distrib }} ${{ matrix.arch }} + + steps: + - name: package deb + run: | + apt-get update + apt-get install -y debhelper dh-autoreconf dpkg-dev libkrb5-dev libnorm-dev libpgm-dev libsodium-dev libunwind8-dev libnss3-dev libgnutls28-dev libbsd-dev pkg-config asciidoc wget xmlto + wget -O - https://github.com/zeromq/libzmq/releases/download/v4.3.5/zeromq-4.3.5.tar.gz | tar zxvf - + + cd zeromq-4.3.5 + ./configure + make + make install + cd .. + + wget -O - https://github.com/zeromq/libzmq/archive/refs/tags/v4.3.5.tar.gz | tar zxvf - + cd libzmq-4.3.5 + ln -s packaging/debian + sed -Ei 's/([0-9]+.[0-9]+.[0-9]+-[0-9]+.[0-9]+)/\1~${{ matrix.distrib }}/' debian/changelog + sed -Ei 's/UNRELEASED/${{ matrix.distrib }}/' debian/changelog + dpkg-buildpackage -us -uc -nc + cd .. + + rm -f libzmq5-dbg_*.deb + shell: bash + + - name: cache deb + uses: actions/cache/save@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4.0.0 + with: + path: ./*.deb + key: ${{ github.run_id }}-${{ github.sha }}-deb-libzmq-${{ matrix.distrib }}-${{ matrix.arch }} + + deliver-rpm: + if: ${{ contains(fromJson('["testing", "unstable"]'), needs.get-version.outputs.stability) }} + needs: [get-version, package-rpm] + environment: ${{ needs.get-version.outputs.environment }} + runs-on: [self-hosted, common] + strategy: + matrix: + include: + - distrib: el8 + arch: amd64 + - distrib: el9 + arch: amd64 + + name: deliver ${{ matrix.distrib }} + + steps: + - name: Checkout sources + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Publish RPM packages + uses: ./.github/actions/delivery + with: + module_name: libzmq + distrib: ${{ matrix.distrib }} + version: ${{ needs.get-version.outputs.version }} + artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} + cache_key: ${{ github.run_id }}-${{ github.sha }}-rpm-libzmq-${{ matrix.distrib }}-${{ matrix.arch }} + stability: ${{ needs.get-version.outputs.stability }} + + deliver-deb: + if: ${{ contains(fromJson('["testing", "unstable"]'), needs.get-version.outputs.stability) }} + needs: [get-version, package-deb] + environment: ${{ needs.get-version.outputs.environment }} + runs-on: [self-hosted, common] + strategy: + matrix: + include: + - distrib: bullseye + arch: amd64 + - distrib: bullseye + arch: arm64 + - distrib: bookworm + arch: amd64 + - distrib: jammy + arch: amd64 + + name: deliver ${{ matrix.distrib }} ${{ matrix.arch }} + + steps: + - name: Checkout sources + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Publish DEB packages + uses: ./.github/actions/delivery + with: + module_name: libzmq + distrib: ${{ matrix.distrib }} + version: ${{ needs.get-version.outputs.version }} + artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} + cache_key: ${{ github.run_id }}-${{ github.sha }}-deb-libzmq-${{ matrix.distrib }}-${{ matrix.arch }} + stability: ${{ needs.get-version.outputs.stability }} + + promote: + needs: [get-version] + if: ${{ contains(fromJson('["stable"]'), needs.get-version.outputs.stability) && github.event_name != 'workflow_dispatch' }} + runs-on: [self-hosted, common] + strategy: + matrix: + distrib: [el8, el9, bullseye, bookworm, jammy] + + steps: + - name: Checkout sources + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Promote ${{ matrix.distrib }} to stable + uses: ./.github/actions/promote-to-stable + with: + artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} + module_name: libzmq + distrib: ${{ matrix.distrib }} + major_version: ${{ needs.get-version.outputs.version }} + minor_version: ${{ needs.get-version.outputs.patch }} + stability: ${{ needs.get-version.outputs.stability }} + repository_name: standard + github_base_ref: ${{ github.base_ref }} diff --git a/.github/workflows/package-collect.yml b/.github/workflows/package-collect.yml index c3b7a82f152..621f2635e78 100644 --- a/.github/workflows/package-collect.yml +++ b/.github/workflows/package-collect.yml @@ -15,6 +15,9 @@ on: commit_hash: required: true type: string + stability: + required: true + type: string jobs: package: @@ -37,6 +40,16 @@ jobs: package_extension: deb runner: collect arch: amd64 + - image: centreon-collect-debian-bookworm + distrib: bookworm + package_extension: deb + runner: collect + arch: amd64 + - image: centreon-collect-ubuntu-jammy + distrib: jammy + package_extension: deb + runner: collect + arch: amd64 - image: centreon-collect-debian-bullseye-arm64 distrib: bullseye package_extension: deb @@ -163,10 +176,11 @@ jobs: release: ${{ inputs.release }} arch: ${{ matrix.arch }} commit_hash: ${{ inputs.commit_hash }} - cache_key: cache-${{ github.sha }}-${{ matrix.package_extension}}-centreon-collect-${{ matrix.distrib }}-${{ matrix.arch }}-${{ github.head_ref || github.ref_name }} + cache_key: ${{ github.run_id }}-${{ github.sha }}-${{ matrix.package_extension}}-centreon-collect-${{ matrix.distrib }}-${{ matrix.arch }}-${{ github.head_ref || github.ref_name }} rpm_gpg_key: ${{ secrets.RPM_GPG_SIGNING_KEY }} rpm_gpg_signing_key_id: ${{ secrets.RPM_GPG_SIGNING_KEY_ID }} rpm_gpg_signing_passphrase: ${{ secrets.RPM_GPG_SIGNING_PASSPHRASE }} + stability: ${{ inputs.stability }} - name: Cleaning not needed packages shell: bash @@ -175,7 +189,7 @@ jobs: # set condition to true if artifacts are needed - if: ${{ false }} name: Upload package artifacts - uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 + uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 # v4.3.0 with: name: packages-${{ matrix.distrib }}-${{ matrix.arch }} path: ./*.${{ matrix.package_extension}} diff --git a/.github/workflows/release-collect.yml b/.github/workflows/release-collect.yml index f91fab7a47f..93f50c6fae1 100644 --- a/.github/workflows/release-collect.yml +++ b/.github/workflows/release-collect.yml @@ -10,6 +10,8 @@ on: - "[2-9][0-9].[0-9][0-9].x" paths: - "centreon-collect/**" + - "!centreon-collect/ci/**" + - "!centreon-collect/tests/**" workflow_dispatch: env: diff --git a/.github/workflows/robot-nightly.yml b/.github/workflows/robot-nightly.yml index 0816720af8d..13db94f259a 100644 --- a/.github/workflows/robot-nightly.yml +++ b/.github/workflows/robot-nightly.yml @@ -49,6 +49,7 @@ jobs: needs: [get-version] uses: ./.github/workflows/package-collect.yml with: + stability: ${{ needs.get-version.outputs.stability }} version: ${{ needs.get-version.outputs.version }}.${{ needs.get-version.outputs.patch }} img_version: ${{ needs.get-version.outputs.img_version }} release: ${{ needs.get-version.outputs.release }} @@ -81,8 +82,16 @@ jobs: package_extension: deb arch: arm64 database_type: mariadb - test_group_name: robot_test-mariadb-el9-arm64 + test_group_name: robot_test-mariadb-bullseye-arm64 tests_params: '{}' + # uncomment once bbdo-protobuf.robot test is fixed + # - distrib: bookworm + # image: centreon-collect-debian-bookworm-test + # package_extension: deb + # arch: amd64 + # database_type: mariadb + # test_group_name: robot_test-mariadb-bookworm-amd64 + # tests_params: '{}' - distrib: el9 image: centreon-collect-alma9-test package_extension: rpm @@ -100,7 +109,7 @@ jobs: image: ${{ matrix.image }} image_test: ${{ matrix.image }}:${{ needs.get-version.outputs.test_img_version }} image_version: ${{ needs.get-version.outputs.img_version }} - package_cache_key: cache-${{ github.sha }}-${{ matrix.package_extension}}-centreon-collect-${{ matrix.distrib }}-${{ matrix.arch }}-${{ github.head_ref || github.ref_name }} + package_cache_key: ${{ github.run_id }}-${{ github.sha }}-${{ matrix.package_extension }}-centreon-collect-${{ matrix.distrib }}-${{ matrix.arch }}-${{ github.head_ref || github.ref_name }} package_cache_path: ./*.${{ matrix.package_extension}} database_type: ${{ matrix.database_type }} tests_params: ${{matrix.tests_params}} @@ -131,7 +140,7 @@ jobs: distrib: ${{ matrix.distrib }} version: ${{ needs.get-version.outputs.version }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} - cache_key: cache-${{ github.sha }}-rpm-centreon-collect-${{ matrix.distrib }}-amd64-${{ github.head_ref || github.ref_name }} + cache_key: ${{ github.run_id }}-${{ github.sha }}-rpm-centreon-collect-${{ matrix.distrib }}-amd64-${{ github.head_ref || github.ref_name }} stability: ${{ needs.get-version.outputs.stability }} deliver-deb: @@ -143,6 +152,10 @@ jobs: include: - distrib: bullseye arch: amd64 + - distrib: bookworm + arch: amd64 + - distrib: jammy + arch: amd64 - distrib: bullseye arch: arm64 name: deliver ${{ matrix.distrib }} @@ -158,5 +171,5 @@ jobs: distrib: ${{ matrix.distrib }} version: ${{ needs.get-version.outputs.version }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} - cache_key: cache-${{ github.sha }}-deb-centreon-collect-${{ matrix.distrib }}-${{ matrix.arch }}-${{ github.head_ref || github.ref_name }} + cache_key: ${{ github.run_id }}-${{ github.sha }}-deb-centreon-collect-${{ matrix.distrib }}-${{ matrix.arch }}-${{ github.head_ref || github.ref_name }} stability: ${{ needs.get-version.outputs.stability }} diff --git a/.github/workflows/robot-test.yml b/.github/workflows/robot-test.yml index 27d5bebf90a..e2c6c2d8d01 100644 --- a/.github/workflows/robot-test.yml +++ b/.github/workflows/robot-test.yml @@ -74,7 +74,7 @@ jobs: shell: bash - name: image to cache - uses: actions/cache/save@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 + uses: actions/cache/save@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4.0.0 with: path: /tmp/${{inputs.image}} key: ${{inputs.image_test}} @@ -113,14 +113,14 @@ jobs: fetch-depth: 0 - name: Restore image - uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 + uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4.0.0 with: path: /tmp/${{inputs.image}} key: ${{inputs.image_test}} fail-on-cache-miss: true - name: Restore packages - uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 + uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4.0.0 with: path: ${{ inputs.package_cache_path }} key: ${{ inputs.package_cache_key }} @@ -157,11 +157,20 @@ jobs: cp tests/output.xml reports/$FILE_PREFIX-output.xml fi + - name: Replace / with - in the feature path + id: feature-path + if: always() + run: | + feature_name="${{ matrix.feature }}" + feature_name_with_dash="${feature_name//\//-}" + echo "Modified Feature Name: $feature_name_with_dash" + echo "feature_name_with_dash=$feature_name_with_dash" >> $GITHUB_OUTPUT + - name: Upload Test Results if: ${{ failure() }} - uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 + uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 # v4.3.0 with: - name: reports-${{inputs.test_group_name}} + name: reports-${{inputs.test_group_name}}-${{ steps.feature-path.outputs.feature_name_with_dash }} path: reports retention-days: 1 @@ -174,12 +183,59 @@ jobs: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Download Artifacts - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + uses: actions/download-artifact@6b208ae046db98c579e8a3aa621ab581ff575935 # v4.1.1 with: - name: reports-${{inputs.test_group_name}} + pattern: reports-${{inputs.test_group_name}}-* path: reports + merge-multiple: true + + - name: Upload the regrouped artifact + uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 # v4.3.0 + with: + name: reports-${{inputs.test_group_name}} + path: reports/ + retention-days: 1 + + - name: Delete separated artifacts + run: | + artifact_pattern="reports-${{inputs.test_group_name}}-" + TOKEN=${{ secrets.GITHUB_TOKEN }} + artifact_exist=true + while [ "$artifact_exist" = true ] ;do + artifact_exist=false + artifacts_response=$(curl -L \ + -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer $TOKEN" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + "https://api.github.com/repos/${{ github.repository }}/actions/artifacts?per_page=100") + artifacts=$(echo $artifacts_response | jq -c '.artifacts[]') + echo "Those are the artifacts : $artifacts" + while read row; do + artifact_name=$(echo "$row" | jq -r '.name') + if [[ "$artifact_name" =~ ^.*"$artifact_pattern".* ]]; then + artifact_exist=true + echo "Deleting : $artifact_name" + artifact_id=$(echo "$row" | jq -r '.id') + curl -L \ + -X DELETE \ + -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer $TOKEN" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + "https://api.github.com/repos/${{ github.repository }}/actions/artifacts/${artifact_id}" + fi + done <<< "$artifacts" + done + echo "End of Deleting" + shell: bash + + # setup-python v5.0.0 relies on node20 which is not supported by el7 distributions + - uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # v4.7.1 + if: ${{ inputs.distrib == 'el7'}} + with: + python-version: '3.10' - - uses: actions/setup-python@v4 + - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + if: ${{ inputs.distrib != 'el7' }} with: python-version: '3.10' diff --git a/.github/workflows/veracode-analysis.yml b/.github/workflows/veracode-analysis.yml index 43952f3436c..b7e638e1e4a 100644 --- a/.github/workflows/veracode-analysis.yml +++ b/.github/workflows/veracode-analysis.yml @@ -27,9 +27,33 @@ on: required: true jobs: + routing: + name: Check before analysis + runs-on: ubuntu-latest + outputs: + development_stage: ${{ steps.routing-mode.outputs.development_stage }} + + steps: + - name: Set routing mode + id: routing-mode + run: | + DEVELOPMENT_STAGE="Development" + + ALLOWED_BRANCHES=(develop master dev-${{ inputs.major_version }}.x ${{ inputs.major_version }}.x) + for BRANCH in "${ALLOWED_BRANCHES[@]}"; do + if [[ "${{ github.ref_name }}" == "$BRANCH" ]] && [[ '${{ github.event_name }}' != 'pull_request' ]]; then + DEVELOPMENT_STAGE="Release" + fi + done + + echo "development_stage=$DEVELOPMENT_STAGE" >> $GITHUB_OUTPUT + cat $GITHUB_OUTPUT + build: name: Binary preparation runs-on: [self-hosted, collect] + needs: [routing] + if: needs.routing.outputs.development_stage != 'Development' container: image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/centreon-collect-alma9:${{ inputs.img_version }} credentials: @@ -110,18 +134,20 @@ jobs: tar cvzf "${{ inputs.module_name }}-${{ github.sha }}-${{ github.run_id }}-veracode-binary.tar.gz" build - name: Cache - uses: actions/cache/save@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 + uses: actions/cache/save@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4.0.0 with: path: "${{ inputs.module_name }}-${{ github.sha }}-${{ github.run_id }}-veracode-binary.tar.gz" key: "${{ inputs.module_name }}-${{ github.sha }}-${{ github.run_id }}-veracode-binary" - sandbox-scan: - needs: [build] + policy-scan: name: Sandbox scan + needs: [routing, build] + if: needs.routing.outputs.development_stage != 'Development' runs-on: ubuntu-latest steps: - name: Promote latest scan + # only develop will be promoted to policy scan if: github.ref_name == 'develop' env: VERACODE_API_ID: "${{ secrets.veracode_api_id }}" @@ -129,7 +155,7 @@ jobs: # Action forked as API calls hardcoded '.com' route uses: sc979/veracode-sandboxes-helper@cf67241c27cbe6405ad8705111121ece9a48c4ff # v0.2 - # Promote should not fail if sandbox was not found. + # Promote should not fail to trigger following sandbox scan. continue-on-error: true with: activity: "promote-latest-scan" @@ -138,7 +164,7 @@ jobs: delete-on-promote: false - name: Get build binary - uses: actions/cache/restore@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 + uses: actions/cache/restore@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4.0.0 with: path: "${{ inputs.module_name }}-${{ github.sha }}-${{ github.run_id }}-veracode-binary.tar.gz" key: "${{ inputs.module_name }}-${{ github.sha }}-${{ github.run_id }}-veracode-binary" diff --git a/CMakeLists.txt b/CMakeLists.txt index 273ca6aedef..4953ec636ce 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -37,6 +37,8 @@ if(NOT CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND NOT CMAKE_CXX_COMPILER_ID FATAL_ERROR "You can build broker with g++ or clang++. CMake will exit.") endif() +option(WITH_MALLOC_TRACE "compile centreon-malloc-trace library." OFF) + # set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14 -stdlib=libc++") # set(CMAKE_CXX_COMPILER "clang++") add_definitions("-D_GLIBCXX_USE_CXX11_ABI=1") @@ -190,6 +192,11 @@ add_subdirectory(engine) add_subdirectory(connectors) add_subdirectory(ccc) +if (WITH_MALLOC_TRACE) + add_subdirectory(malloc-trace) +endif() + + add_custom_target(test-broker COMMAND tests/ut_broker) add_custom_target(test-engine COMMAND tests/ut_engine) add_custom_target(test-clib COMMAND tests/ut_clib) diff --git a/broker/CMakeLists.txt b/broker/CMakeLists.txt index 59b6a56a106..ca52139b57a 100644 --- a/broker/CMakeLists.txt +++ b/broker/CMakeLists.txt @@ -491,7 +491,8 @@ target_link_libraries( ${CONAN_LIBS_PROTOBUF} "-Wl,--no-whole-archive" CONAN_PKG::spdlog - CONAN_PKG::grpc) + CONAN_PKG::grpc + stdc++fs) # Centreon Broker Watchdog option(WITH_CBWD "Build centreon broker watchdog." ON) diff --git a/broker/core/multiplexing/src/muxer.cc b/broker/core/multiplexing/src/muxer.cc index 1e02aab8ee4..4ac2c1e7241 100644 --- a/broker/core/multiplexing/src/muxer.cc +++ b/broker/core/multiplexing/src/muxer.cc @@ -1,20 +1,20 @@ /** -* Copyright 2009-2013,2015-2017,2019-2021 Centreon -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -* -* For more information : contact@centreon.com -*/ + * Copyright 2009-2013,2015-2017,2019-2021 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ #include "com/centreon/broker/multiplexing/muxer.hh" @@ -195,15 +195,19 @@ std::shared_ptr muxer::create(std::string name, * Destructor. */ muxer::~muxer() noexcept { - stats::center::instance().unregister_muxer(_name); unsubscribe(); - std::lock_guard lock(_mutex); - SPDLOG_LOGGER_INFO(log_v2::core(), - "Destroying muxer {}: number of events in the queue: {}", - _name, _events_size); - _clean(); + { + std::lock_guard lock(_mutex); + SPDLOG_LOGGER_INFO(log_v2::core(), + "Destroying muxer {}: number of events in the queue: {}", + _name, _events_size); + _clean(); + } DEBUG( fmt::format("DESTRUCTOR muxer {:p} {}", static_cast(this), _name)); + // caution, unregister_muxer must be the last center method called at muxer + // destruction to avoid re create a muxer stat entry + stats::center::instance().unregister_muxer(_name); } /** diff --git a/broker/core/sql/src/mysql_connection.cc b/broker/core/sql/src/mysql_connection.cc index db18b4e50f3..23a38f455a8 100644 --- a/broker/core/sql/src/mysql_connection.cc +++ b/broker/core/sql/src/mysql_connection.cc @@ -848,6 +848,7 @@ void mysql_connection::_run() { ::mysql_error(_conn))); _state = finished; _start_condition.notify_all(); + _clear_connection(); return; } _last_access = std::time(nullptr); diff --git a/broker/lua/inc/com/centreon/broker/lua/broker_event.hh b/broker/lua/inc/com/centreon/broker/lua/broker_event.hh index 7c5b0d07bac..16f94daa4e3 100644 --- a/broker/lua/inc/com/centreon/broker/lua/broker_event.hh +++ b/broker/lua/inc/com/centreon/broker/lua/broker_event.hh @@ -48,13 +48,25 @@ namespace lua { * */ class broker_event { + struct gc_info { + gc_info() : _broker_event_cpt(0), _last_full_gc(time(nullptr)) {} + + unsigned _broker_event_cpt; + time_t _last_full_gc; + }; + + static std::map _gc_info; + static std::mutex _gc_info_m; + + static int l_broker_event_destructor(lua_State* L); public: static void broker_event_reg(lua_State* L); static void create(lua_State* L, std::shared_ptr e); static void create_as_table(lua_State* L, const io::data& e); + static void lua_close(const lua_State* L); }; } // namespace lua -} +} // namespace com::centreon::broker #endif // !CCB_LUA_BROKER_EVENT_HH diff --git a/broker/lua/inc/com/centreon/broker/lua/luabinding.hh b/broker/lua/inc/com/centreon/broker/lua/luabinding.hh index 9f3e7ba0aeb..74b5a048ce0 100644 --- a/broker/lua/inc/com/centreon/broker/lua/luabinding.hh +++ b/broker/lua/inc/com/centreon/broker/lua/luabinding.hh @@ -1,20 +1,20 @@ -/* -** Copyright 2018 Centreon -** -** Licensed under the Apache License, Version 2.0 (the "License"); -** you may not use this file except in compliance with the License. -** You may obtain a copy of the License at -** -** http://www.apache.org/licenses/LICENSE-2.0 -** -** Unless required by applicable law or agreed to in writing, software -** distributed under the License is distributed on an "AS IS" BASIS, -** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -** See the License for the specific language governing permissions and -** limitations under the License. -** -** For more information : contact@centreon.com -*/ +/** + * Copyright 2018-2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ #ifndef CCB_LUA_LUABINDING_HH #define CCB_LUA_LUABINDING_HH @@ -112,7 +112,7 @@ class luabinding { macro_cache& cache); luabinding(luabinding const&) = delete; luabinding& operator=(luabinding const&) = delete; - ~luabinding() noexcept = default; + ~luabinding() noexcept; bool has_filter() const noexcept; int32_t write(std::shared_ptr const& data) noexcept; bool has_flush() const noexcept; @@ -125,6 +125,6 @@ void push_event_as_table(lua_State* L, io::data const& d); } // namespace lua -} +} // namespace com::centreon::broker #endif // !CCB_LUA_LUA_HH diff --git a/broker/lua/src/broker_event.cc b/broker/lua/src/broker_event.cc index 2c5644fde0c..84f639f2525 100644 --- a/broker/lua/src/broker_event.cc +++ b/broker/lua/src/broker_event.cc @@ -22,6 +22,7 @@ #include "com/centreon/broker/io/data.hh" #include "com/centreon/broker/io/protobuf.hh" #include "com/centreon/broker/mapping/entry.hh" +#include "com/centreon/broker/multiplexing/muxer.hh" #include "com/centreon/exceptions/msg_fmt.hh" using namespace com::centreon::broker; @@ -32,6 +33,8 @@ static void _write_item(lua_State* L, const google::protobuf::Message* p, const google::protobuf::FieldDescriptor* f); +std::map broker_event::_gc_info; +std::mutex broker_event::_gc_info_m; /** * The Lua broker_event constructor * @@ -48,6 +51,32 @@ void broker_event::create(lua_State* L, std::shared_ptr e) { luaL_getmetatable(L, "broker_event"); lua_setmetatable(L, -2); + bool have_to_gc_collect = false; + { + std::lock_guard l(_gc_info_m); + + /*In V2, lua stores only a userdata that contains a shared_ptr of event + * (16 bytes). So garbage collector don't see amount of memory used by + * events. + * So we need to call garbage collector ourselves to reduce memory + * consumption + * So we call at least gc every minute or + * at most every 10s if lua own more than + * com::centreon::broker::multiplexing::muxer::event_queue_max_size() events + * */ + time_t now = time(nullptr); + gc_info& gc_inf = _gc_info[L]; + if ((++gc_inf._broker_event_cpt > com::centreon::broker::multiplexing:: + muxer::event_queue_max_size() && + gc_inf._last_full_gc + 10 < now) || + (gc_inf._last_full_gc + 60 < now)) { + gc_inf._last_full_gc = now; + have_to_gc_collect = true; + } + } + if (have_to_gc_collect) { + lua_gc(L, LUA_GCCOLLECT, 0); + } } static void _message_to_table(lua_State* L, @@ -344,12 +373,18 @@ void broker_event::create_as_table(lua_State* L, const io::data& d) { * * @return 0 */ -static int l_broker_event_destructor(lua_State* L) { +int broker_event::l_broker_event_destructor(lua_State* L) { void* ptr = luaL_checkudata(L, 1, "broker_event"); if (ptr) { auto event = static_cast*>(ptr); event->reset(); + std::lock_guard l(_gc_info_m); + + gc_info& gc_inf = _gc_info[L]; + if (gc_inf._broker_event_cpt > 0) { + --gc_inf._broker_event_cpt; + } } return 0; } @@ -758,3 +793,13 @@ void broker_event::broker_event_reg(lua_State* L) { lua_setglobal(L, name); } + +/** + * @brief when a lua_State is closed we clean _gc_info + * + * @param The Lua interpreter as a lua_State* + */ +void broker_event::lua_close(const lua_State* L) { + std::lock_guard l(_gc_info_m); + _gc_info.erase(L); +} diff --git a/broker/lua/src/luabinding.cc b/broker/lua/src/luabinding.cc index 6fc6250830d..c47d7ac3a96 100644 --- a/broker/lua/src/luabinding.cc +++ b/broker/lua/src/luabinding.cc @@ -1,22 +1,23 @@ /** -* Copyright 2018 Centreon -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -* -* For more information : contact@centreon.com -*/ + * Copyright 2018-2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ #include + #include #include "com/centreon/broker/log_v2.hh" @@ -40,6 +41,11 @@ static int l_pairs(lua_State* L) { return 3; } #endif + +#define RETURN_AND_POP(val) \ + lua_pop(_L, lua_gettop(_L)); \ + return val + /** * Constructor. * @@ -73,10 +79,19 @@ luabinding::luabinding(std::string const& lua_script, } } +/** + * @brief Destructor of luabinding. + */ +luabinding::~luabinding() noexcept { + stop(); +} + int32_t luabinding::stop() { - int32_t retval = flush(); + int32_t retval = 0; if (_L) { + retval = flush(); lua_close(_L); + broker_event::lua_close(_L); _L = nullptr; } return retval; @@ -166,9 +181,30 @@ void luabinding::_load_script(const std::string& lua_script) { "lua: filter() global function is missing, the write() function will " "be called for each event"); _filter = false; - } else + } else { _filter = true; - lua_pop(_L, 1); + /* Just a call with cat = 1 and elem = 2 of filter to check its return. + * It is not sufficent but checks almost all cases. */ + lua_pushinteger(_L, 1); + lua_pushinteger(_L, 2); + if (lua_pcall(_L, 2, 1, 0) != 0) { + const char* ret = lua_tostring(_L, -1); + if (ret) + log_v2::lua()->error( + "lua: The filter() function doesn't work correctly: {}", ret); + else + log_v2::lua()->error( + "lua: The filter() function doesn't work correctly"); + _filter = false; + } else { + if (!lua_isboolean(_L, -1)) { + log_v2::lua()->error( + "lua: The filter() function should return a boolean."); + _filter = false; + } + } + } + lua_pop(_L, lua_gettop(_L)); // Checking for flush() availability: this function is optional lua_getglobal(_L, "flush"); @@ -231,6 +267,7 @@ void luabinding::_load_script(const std::string& lua_script) { // Registers the broker cache broker_cache::broker_cache_reg(_L, _cache, _broker_api_version); + assert(lua_gettop(_L) == 0); } /** @@ -330,18 +367,18 @@ int luabinding::write(std::shared_ptr const& data) noexcept { SPDLOG_LOGGER_ERROR( log_v2::lua(), "lua: unknown error while running function `filter()'"); - return 0; + RETURN_AND_POP(0); } if (!lua_isboolean(_L, -1)) { SPDLOG_LOGGER_ERROR(log_v2::lua(), "lua: `filter' must return a boolean"); - return 0; + RETURN_AND_POP(0); } execute_write = lua_toboolean(_L, -1); SPDLOG_LOGGER_DEBUG(log_v2::lua(), "lua: `filter' returned {}", (execute_write ? "true" : "false")); - lua_pop(_L, -1); + lua_pop(_L, lua_gettop(_L)); } if (!execute_write) @@ -371,15 +408,14 @@ int luabinding::write(std::shared_ptr const& data) noexcept { else SPDLOG_LOGGER_ERROR(log_v2::lua(), "lua: unknown error running function `write'"); - return 0; + RETURN_AND_POP(0); } if (!lua_isboolean(_L, -1)) { SPDLOG_LOGGER_ERROR(log_v2::lua(), "lua: `write' must return a boolean"); - return 0; + RETURN_AND_POP(0); } int acknowledge = lua_toboolean(_L, -1); - lua_pop(_L, -1); // We have to acknowledge rejected events by the filter. It is only possible // when an acknowledgement is sent by the write function. @@ -387,7 +423,7 @@ int luabinding::write(std::shared_ptr const& data) noexcept { retval = _total; _total = 0; } - return retval; + RETURN_AND_POP(retval); } /** @@ -417,19 +453,18 @@ int32_t luabinding::flush() noexcept { else SPDLOG_LOGGER_ERROR(log_v2::lua(), "lua: unknown error running function `flush'"); - return 0; + RETURN_AND_POP(0); } if (!lua_isboolean(_L, -1)) { SPDLOG_LOGGER_ERROR(log_v2::lua(), "lua: `flush' must return a boolean"); - return 0; + RETURN_AND_POP(0); } bool acknowledge = lua_toboolean(_L, -1); - lua_pop(_L, -1); int32_t retval = 0; if (acknowledge) { retval = _total; _total = 0; } - return retval; + RETURN_AND_POP(retval); } diff --git a/broker/lua/test/lua.cc b/broker/lua/test/lua.cc index 3153f45e2b5..4a087abf5e6 100644 --- a/broker/lua/test/lua.cc +++ b/broker/lua/test/lua.cc @@ -1,20 +1,20 @@ /** -* Copyright 2018-2022 Centreon -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -* -* For more information : contact@centreon.com -*/ + * Copyright 2018-2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ #include #include @@ -4764,3 +4764,69 @@ TEST_F(LuaTest, JsonDecodeNull) { RemoveFile(filename); RemoveFile("/tmp/log"); } + +TEST_F(LuaTest, BadLua) { + config::applier::modules modules; + modules.load_file("./lib/10-neb.so"); + std::map conf; + std::string filename("/tmp/bad.lua"); + CreateScript(filename, + "function init(conf)\n" + " broker_log:set_parameters(3, '/tmp/log')\n" + "end\n\n" + "function write(d)\n" + " bad_function()\n" + " return true\n" + "end\n"); + auto binding{std::make_unique(filename, conf, *_cache)}; + auto s{std::make_unique()}; + s->host_id = 12; + s->service_id = 18; + s->output = "Bonjour"; + std::shared_ptr svc(s.release()); + ASSERT_EQ(binding->write(svc), 0); + RemoveFile(filename); +} + +// When a lua script that contains a bad filter() function. "Bad" here +// is because the return value is not a boolean. +// Then has_filter() returns false and there is no leak on the stack. +// (the leak is seen when compiled with -g). +TEST_F(LuaTest, WithBadFilter1) { + std::map conf; + std::string filename("/tmp/with_bad_filter.lua"); + CreateScript(filename, + "function init()\n" + "end\n" + "function filter(c, e)\n" + " return \"foo\", \"bar\"\n" + "end\n" + "function write(d)\n" + " return 1\n" + "end"); + auto bb{std::make_unique(filename, conf, *_cache)}; + ASSERT_FALSE(bb->has_filter()); + RemoveFile(filename); +} + +// When a lua script that contains a bad filter() function. "Bad" here +// because the filter calls a function that doesn't exist. +// Then has_filter() returns false and there is no leak on the stack. +// (the leak is seen when compiled with -g). +TEST_F(LuaTest, WithBadFilter2) { + std::map conf; + std::string filename("/tmp/with_bad_filter.lua"); + CreateScript(filename, + "function init()\n" + "end\n" + "function filter(c, e)\n" + " unexisting_function()\n" + " return \"foo\", \"bar\"\n" + "end\n" + "function write(d)\n" + " return 1\n" + "end"); + auto bb{std::make_unique(filename, conf, *_cache)}; + ASSERT_FALSE(bb->has_filter()); + RemoveFile(filename); +} diff --git a/broker/rrd/src/creator.cc b/broker/rrd/src/creator.cc index 4656ca6ddec..231336df3e4 100644 --- a/broker/rrd/src/creator.cc +++ b/broker/rrd/src/creator.cc @@ -27,6 +27,7 @@ #include #include #include +#include #include "bbdo/storage/metric.hh" #include "com/centreon/broker/log_v2.hh" @@ -45,7 +46,8 @@ using namespace com::centreon::broker::rrd; */ creator::creator(std::string const& tmpl_path, uint32_t cache_size) : _cache_size(cache_size), _tmpl_path(tmpl_path) { - log_v2::rrd()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::rrd(), "RRD: file creator will maintain at most {} templates in '{}'", _cache_size, _tmpl_path); } @@ -105,7 +107,8 @@ void creator::create(std::string const& filename, if (it != _fds.end() && it->first.is_length_step_type_equal(info) && it->first.from <= from) { _duplicate(filename, it->second); - log_v2::rrd()->debug("reuse {} for {}", it->second.path, filename); + SPDLOG_LOGGER_DEBUG(log_v2::rrd(), "reuse {} for {}", it->second.path, + filename); } // Not in the cache, but we have enough space in the cache. // Create new entry. @@ -263,15 +266,28 @@ void creator::_open(std::string const& filename, // Debug message. argv[argc] = nullptr; - log_v2::rrd()->debug("RRD: opening file '{}' ({}, {}, {}, step 1, from {})", - filename, argv[0], argv[1], - (argv[2] ? argv[2] : "(null)"), from); + SPDLOG_LOGGER_DEBUG( + log_v2::rrd(), "RRD: create file '{}' ({}, {}, {}, step 1, from {})", + filename, argv[0], argv[1], (argv[2] ? argv[2] : "(null)"), from); // Create RRD file. rrd_clear_error(); if (rrd_create_r(filename.c_str(), 1, from, argc, argv)) throw exceptions::open("RRD: could not create file '{}: {}", filename, rrd_get_error()); + + // by default rrd_create_r create rw-r----- files group write is mandatory + // for rrdcached + std::error_code err; + std::filesystem::permissions( + filename, + std::filesystem::perms::group_read | std::filesystem::perms::group_write, + std::filesystem::perm_options::add, err); + if (err) { + SPDLOG_LOGGER_ERROR(log_v2::rrd(), + "RRD: fail to add access rights (660) to {}: {}", + filename, err.message()); + } } /** diff --git a/broker/tcp/src/tcp_connection.cc b/broker/tcp/src/tcp_connection.cc index d5048c442ec..e21e3fcea90 100644 --- a/broker/tcp/src/tcp_connection.cc +++ b/broker/tcp/src/tcp_connection.cc @@ -1,20 +1,20 @@ /** -* Copyright 2020-2021 Centreon -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -* -* For more information : contact@centreon.com -*/ + * Copyright 2020-2021 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ #include "com/centreon/broker/tcp/tcp_connection.hh" #include "com/centreon/broker/exceptions/connection_closed.hh" diff --git a/broker/test/CMakeLists.txt b/broker/test/CMakeLists.txt index ceeaf93f23a..50afdaa5b6c 100644 --- a/broker/test/CMakeLists.txt +++ b/broker/test/CMakeLists.txt @@ -144,7 +144,8 @@ target_link_libraries( CONAN_PKG::gtest CONAN_PKG::mariadb-connector-c CONAN_PKG::openssl - CONAN_PKG::grpc) + CONAN_PKG::grpc + stdc++fs) add_dependencies( ut_broker diff --git a/broker/tls/inc/com/centreon/broker/tls/params.hh b/broker/tls/inc/com/centreon/broker/tls/params.hh index d409f6d8550..cb7135b7623 100644 --- a/broker/tls/inc/com/centreon/broker/tls/params.hh +++ b/broker/tls/inc/com/centreon/broker/tls/params.hh @@ -21,10 +21,8 @@ #include +namespace com::centreon::broker::tls { -namespace com::centreon::broker { - -namespace tls { /** * @class params params.hh "com/centreon/broker/tls/params.hh" * @brief Configure parameters of a TLS connection (either incoming @@ -59,17 +57,16 @@ class params { params(params const& p) = delete; params& operator=(params const& p) = delete; virtual ~params(); - void apply(gnutls_session_t session); + void apply(gnutls_session_t session) const; void load(); void reset(); void set_cert(std::string const& cert, std::string const& key); void set_compression(bool compress = false); void set_trusted_ca(std::string const& ca_cert); void set_tls_hostname(std::string const& tls_hostname); - void validate_cert(gnutls_session_t session); + void validate_cert(gnutls_session_t session) const; }; -} // namespace tls -} +} // namespace com::centreon::broker::tls #endif // !CCB_TLS_PARAMS_HH diff --git a/broker/tls/inc/com/centreon/broker/tls/stream.hh b/broker/tls/inc/com/centreon/broker/tls/stream.hh index f4a28a311fb..8b59b375ae9 100644 --- a/broker/tls/inc/com/centreon/broker/tls/stream.hh +++ b/broker/tls/inc/com/centreon/broker/tls/stream.hh @@ -22,10 +22,10 @@ #include #include "com/centreon/broker/io/stream.hh" +#include "com/centreon/broker/tls/params.hh" -namespace com::centreon::broker { +namespace com::centreon::broker::tls { -namespace tls { /** * @class stream stream.hh "com/centreon/broker/tls/stream.hh" * @brief TLS wrapper of an underlying stream. @@ -38,11 +38,14 @@ namespace tls { class stream : public io::stream { std::vector _buffer; time_t _deadline; - gnutls_session_t* _session; + gnutls_session_t _session; public: - stream(gnutls_session_t* session); + stream(unsigned int session_flags); ~stream(); + + void init(const params& param); + stream(const stream&) = delete; stream& operator=(const stream&) = delete; bool read(std::shared_ptr& d, time_t deadline) override; @@ -51,8 +54,7 @@ class stream : public io::stream { int32_t stop() override { return 0; } long long write_encrypted(void const* buffer, long long size); }; -} // namespace tls -} +} // namespace com::centreon::broker::tls #endif // !CCB_TLS_STREAM_HH diff --git a/broker/tls/src/acceptor.cc b/broker/tls/src/acceptor.cc index 80c631b2aea..18455596d13 100644 --- a/broker/tls/src/acceptor.cc +++ b/broker/tls/src/acceptor.cc @@ -1,28 +1,26 @@ /** -* Copyright 2009-2013, 2021 Centreon -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -* -* For more information : contact@centreon.com -*/ + * Copyright 2009-2013, 2021 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ #include "com/centreon/broker/tls/acceptor.hh" #include #include "com/centreon/broker/log_v2.hh" -#include "com/centreon/broker/tls/internal.hh" -#include "com/centreon/broker/tls/params.hh" #include "com/centreon/broker/tls/stream.hh" #include "com/centreon/exceptions/msg_fmt.hh" @@ -87,7 +85,7 @@ std::shared_ptr acceptor::open() { */ std::shared_ptr acceptor::open( const std::shared_ptr& lower) { - std::shared_ptr u; + std::shared_ptr u; if (lower) { int ret; @@ -98,61 +96,10 @@ std::shared_ptr acceptor::open( p.set_tls_hostname(_tls_hostname); p.load(); - gnutls_session_t* session(new gnutls_session_t); - try { - // Initialize the TLS session - log_v2::tls()->debug("TLS: initializing session"); - // GNUTLS_NONBLOCK was introduced in gnutls 2.99.3. -#ifdef GNUTLS_NONBLOCK - ret = gnutls_init(session, GNUTLS_SERVER | GNUTLS_NONBLOCK); -#else - ret = gnutls_init(session, GNUTLS_SERVER); -#endif // GNUTLS_NONBLOCK - if (ret != GNUTLS_E_SUCCESS) { - log_v2::tls()->error("TLS: cannot initialize session: {}", - gnutls_strerror(ret)); - throw msg_fmt("TLS: cannot initialize session: {}", - gnutls_strerror(ret)); - } - - // Apply TLS parameters. - p.apply(*session); - - // Create stream object. - u.reset(new stream(session)); - } catch (...) { - gnutls_deinit(*session); - delete session; - throw; - } + // Create stream object. + u = std::make_shared(GNUTLS_SERVER | GNUTLS_NONBLOCK); u->set_substream(lower); - - // Bind the TLS session with the stream from the lower layer. -#if GNUTLS_VERSION_NUMBER < 0x020C00 - gnutls_transport_set_lowat(*session, 0); -#endif // GNU TLS < 2.12.0 - gnutls_transport_set_pull_function(*session, pull_helper); - gnutls_transport_set_push_function(*session, push_helper); - gnutls_transport_set_ptr(*session, u.get()); - - // Perform the TLS handshake. - log_v2::tls()->debug("TLS: performing handshake"); - do { - ret = gnutls_handshake(*session); - } while (GNUTLS_E_AGAIN == ret || GNUTLS_E_INTERRUPTED == ret); - if (ret != GNUTLS_E_SUCCESS) { - log_v2::tls()->error("TLS: handshake failed: {}", gnutls_strerror(ret)); - throw msg_fmt("TLS: handshake failed: {} ", gnutls_strerror(ret)); - } - log_v2::tls()->debug("TLS: successful handshake"); - gnutls_protocol_t prot = gnutls_protocol_get_version(*session); - gnutls_cipher_algorithm_t ciph = gnutls_cipher_get(*session); - log_v2::tls()->debug("TLS: protocol and cipher {} {} used", - gnutls_protocol_get_name(prot), - gnutls_cipher_get_name(ciph)); - - // Check certificate. - p.validate_cert(*session); + u->init(p); } return u; diff --git a/broker/tls/src/connector.cc b/broker/tls/src/connector.cc index 010a262eed8..07e27119434 100644 --- a/broker/tls/src/connector.cc +++ b/broker/tls/src/connector.cc @@ -1,26 +1,24 @@ /** -* Copyright 2009-2013 Centreon -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -* -* For more information : contact@centreon.com -*/ + * Copyright 2009-2013 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ #include "com/centreon/broker/tls/connector.hh" #include "com/centreon/broker/log_v2.hh" -#include "com/centreon/broker/tls/internal.hh" -#include "com/centreon/broker/tls/params.hh" #include "com/centreon/broker/tls/stream.hh" #include "com/centreon/exceptions/msg_fmt.hh" @@ -72,7 +70,7 @@ std::shared_ptr connector::open() { * @return Encrypted stream. */ std::shared_ptr connector::open(std::shared_ptr lower) { - std::shared_ptr u; + std::shared_ptr u; if (lower) { int ret; // Load parameters. @@ -82,61 +80,10 @@ std::shared_ptr connector::open(std::shared_ptr lower) { p.set_tls_hostname(_tls_hostname); p.load(); - gnutls_session_t* session(new gnutls_session_t); - try { - // Initialize the TLS session - log_v2::tls()->debug("TLS: initializing session"); -#ifdef GNUTLS_NONBLOCK - ret = gnutls_init(session, GNUTLS_CLIENT | GNUTLS_NONBLOCK); -#else - ret = gnutls_init(session, GNUTLS_CLIENT); -#endif // GNUTLS_NONBLOCK - if (ret != GNUTLS_E_SUCCESS) { - log_v2::tls()->error("TLS: cannot initialize session: {}", - gnutls_strerror(ret)); - throw msg_fmt("TLS: cannot initialize session: {} ", - gnutls_strerror(ret)); - } - - // Apply TLS parameters to the current session. - p.apply(*session); - - // Create stream object. - u.reset(new stream(session)); - } catch (...) { - gnutls_deinit(*session); - delete session; - throw; - } + // Create stream object. + u = std::make_shared(GNUTLS_CLIENT); u->set_substream(lower); - - // Bind the TLS session with the stream from the lower layer. -#if GNUTLS_VERSION_NUMBER < 0x020C00 - gnutls_transport_set_lowat(*session, 0); -#endif // GNU TLS < 2.12.0 - gnutls_transport_set_pull_function(*session, pull_helper); - gnutls_transport_set_push_function(*session, push_helper); - gnutls_transport_set_ptr(*session, u.get()); - - // Perform the TLS handshake. - log_v2::tls()->debug("TLS: performing handshake"); - do { - ret = gnutls_handshake(*session); - } while (GNUTLS_E_AGAIN == ret || GNUTLS_E_INTERRUPTED == ret); - if (ret != GNUTLS_E_SUCCESS) { - log_v2::tls()->error("TLS: handshake failed: {}", gnutls_strerror(ret)); - throw msg_fmt("TLS: handshake failed: {}", gnutls_strerror(ret)); - } - - log_v2::tls()->debug("TLS: successful handshake"); - gnutls_protocol_t prot = gnutls_protocol_get_version(*session); - gnutls_cipher_algorithm_t ciph = gnutls_cipher_get(*session); - log_v2::tls()->debug("TLS: protocol and cipher {} {} used", - gnutls_protocol_get_name(prot), - gnutls_cipher_get_name(ciph)); - - // Check certificate if necessary. - p.validate_cert(*session); + u->init(p); } return u; diff --git a/broker/tls/src/internal.cc b/broker/tls/src/internal.cc index e570792d8c5..5fc09236db6 100644 --- a/broker/tls/src/internal.cc +++ b/broker/tls/src/internal.cc @@ -1,20 +1,20 @@ /** -* Copyright 2009-2013,2017 Centreon -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -* -* For more information : contact@centreon.com -*/ + * Copyright 2009-2013,2017 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ #include @@ -136,7 +136,12 @@ void tls::initialize() { * layer and give it to TLS for decoding. */ ssize_t tls::pull_helper(gnutls_transport_ptr_t ptr, void* data, size_t size) { - return static_cast(ptr)->read_encrypted(data, size); + try { + return static_cast(ptr)->read_encrypted(data, size); + } catch (const std::exception& e) { + SPDLOG_LOGGER_DEBUG(log_v2::tls(), "read failed: {}", e.what()); + return -1; + } } /** @@ -146,5 +151,10 @@ ssize_t tls::pull_helper(gnutls_transport_ptr_t ptr, void* data, size_t size) { ssize_t tls::push_helper(gnutls_transport_ptr_t ptr, void const* data, size_t size) { - return static_cast(ptr)->write_encrypted(data, size); + try { + return static_cast(ptr)->write_encrypted(data, size); + } catch (const std::exception& e) { + SPDLOG_LOGGER_DEBUG(log_v2::tls(), "write failed: {}", e.what()); + return -1; + } } diff --git a/broker/tls/src/params.cc b/broker/tls/src/params.cc index 81baa3cd555..6d633964c32 100644 --- a/broker/tls/src/params.cc +++ b/broker/tls/src/params.cc @@ -50,7 +50,7 @@ params::~params() { * * @param[out] session Object on which parameters will be applied. */ -void params::apply(gnutls_session_t session) { +void params::apply(gnutls_session_t session)const { // Set the encryption method (normal ciphers with anonymous // Diffie-Hellman and optionnally compression). int ret; @@ -221,7 +221,7 @@ void params::set_trusted_ca(std::string const& ca_cert) { * * @param[in] session Session on which checks will be performed. */ -void params::validate_cert(gnutls_session_t session) { +void params::validate_cert(gnutls_session_t session) const { if (!_ca.empty()) { int ret; uint32_t status; diff --git a/broker/tls/src/stream.cc b/broker/tls/src/stream.cc index 46708fc5533..b0528e8a555 100644 --- a/broker/tls/src/stream.cc +++ b/broker/tls/src/stream.cc @@ -1,22 +1,20 @@ /** -* Copyright 2009-2017 Centreon -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -* -* For more information : contact@centreon.com -*/ - -#include "com/centreon/broker/tls/stream.hh" + * Copyright 2009-2017 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ #include @@ -25,6 +23,9 @@ #include "com/centreon/broker/log_v2.hh" #include "com/centreon/exceptions/msg_fmt.hh" +#include "com/centreon/broker/tls/internal.hh" +#include "com/centreon/broker/tls/stream.hh" + using namespace com::centreon::broker; using namespace com::centreon::broker::tls; using namespace com::centreon::exceptions; @@ -44,8 +45,56 @@ using namespace com::centreon::exceptions; * @param[in] sess TLS session, providing informations on the * encryption that should be used. */ -stream::stream(gnutls_session_t* sess) - : io::stream("TLS"), _deadline((time_t)-1), _session(sess) {} +stream::stream(unsigned int session_flags) + : io::stream("TLS"), _deadline((time_t)-1), _session(nullptr) { + SPDLOG_LOGGER_DEBUG(log_v2::tls(), "{:p} TLS: created", + static_cast(this)); + int ret = gnutls_init(&_session, session_flags); + if (ret != GNUTLS_E_SUCCESS) { + SPDLOG_LOGGER_ERROR(log_v2::tls(), "TLS: cannot initialize session: {}", + gnutls_strerror(ret)); + throw msg_fmt("TLS: cannot initialize session: {}", gnutls_strerror(ret)); + } +} + +/** + * @brief this mehtod initialize crypto of the stream + * + * @param param crypto params that will validate cert of the session + */ +void stream::init(const params& param) { + int ret; + param.apply(_session); + + // Bind the TLS session with the stream from the lower layer. +#if GNUTLS_VERSION_NUMBER < 0x020C00 + gnutls_transport_set_lowat(*session, 0); +#endif // GNU TLS < 2.12.0 + gnutls_transport_set_pull_function(_session, pull_helper); + gnutls_transport_set_push_function(_session, push_helper); + gnutls_transport_set_ptr(_session, this); + + // Perform the TLS handshake. + SPDLOG_LOGGER_DEBUG(log_v2::tls(), "{:p} TLS: performing handshake", + static_cast(this)); + do { + ret = gnutls_handshake(_session); + } while (GNUTLS_E_AGAIN == ret || GNUTLS_E_INTERRUPTED == ret); + if (ret != GNUTLS_E_SUCCESS) { + SPDLOG_LOGGER_ERROR(log_v2::tls(), "TLS: handshake failed: {}", + gnutls_strerror(ret)); + throw msg_fmt("TLS: handshake failed: {} ", gnutls_strerror(ret)); + } + SPDLOG_LOGGER_DEBUG(log_v2::tls(), "TLS: successful handshake"); + gnutls_protocol_t prot = gnutls_protocol_get_version(_session); + gnutls_cipher_algorithm_t ciph = gnutls_cipher_get(_session); + SPDLOG_LOGGER_DEBUG(log_v2::tls(), "TLS: protocol and cipher {} {} used", + gnutls_protocol_get_name(prot), + gnutls_cipher_get_name(ciph)); + + // Check certificate. + param.validate_cert(_session); +} /** * @brief Destructor. @@ -56,11 +105,11 @@ stream::stream(gnutls_session_t* sess) stream::~stream() { if (_session) { try { - _deadline = time(nullptr) + 30; // XXX : use connection timeout - gnutls_bye(*_session, GNUTLS_SHUT_RDWR); - gnutls_deinit(*_session); - delete (_session); - _session = nullptr; + SPDLOG_LOGGER_DEBUG(log_v2::tls(), "{:p} TLS: destroy session: {:p}", + static_cast(this), + static_cast(_session)); + gnutls_bye(_session, GNUTLS_SHUT_RDWR); + gnutls_deinit(_session); } // Ignore exception whatever the error might be. catch (...) { @@ -89,11 +138,11 @@ bool stream::read(std::shared_ptr& d, time_t deadline) { _deadline = deadline; std::shared_ptr buffer(new io::raw); buffer->resize(BUFSIZ); - int ret(gnutls_record_recv(*_session, buffer->data(), buffer->size())); + int ret(gnutls_record_recv(_session, buffer->data(), buffer->size())); if (ret < 0) { if ((ret != GNUTLS_E_INTERRUPTED) && (ret != GNUTLS_E_AGAIN)) { - log_v2::tls()->error("TLS: could not receive data: {}", - gnutls_strerror(ret)); + SPDLOG_LOGGER_ERROR(log_v2::tls(), "TLS: could not receive data: {}", + gnutls_strerror(ret)); throw msg_fmt("TLS: could not receive data: {} ", gnutls_strerror(ret)); } else return false; @@ -102,7 +151,7 @@ bool stream::read(std::shared_ptr& d, time_t deadline) { d = buffer; return true; } else { - log_v2::tls()->error("TLS session is terminated"); + SPDLOG_LOGGER_ERROR(log_v2::tls(), "TLS session is terminated"); throw msg_fmt("TLS session is terminated"); } return false; @@ -117,39 +166,45 @@ bool stream::read(std::shared_ptr& d, time_t deadline) { * @return Number of bytes actually read. */ long long stream::read_encrypted(void* buffer, long long size) { - // Read some data. - bool timed_out(false); - while (_buffer.empty()) { - std::shared_ptr d; - timed_out = !_substream->read(d, _deadline); - if (!timed_out && d && d->type() == io::raw::static_type()) { - io::raw* r(static_cast(d.get())); - _buffer.reserve(_buffer.size() + r->get_buffer().size()); - _buffer.insert(_buffer.end(), r->get_buffer().begin(), - r->get_buffer().end()); - //_buffer.append(r->data(), r->size()); - } else if (timed_out) - break; - } + try { + // Read some data. + bool timed_out(false); + while (_buffer.empty()) { + std::shared_ptr d; + timed_out = !_substream->read(d, _deadline); + if (!timed_out && d && d->type() == io::raw::static_type()) { + io::raw* r(static_cast(d.get())); + _buffer.reserve(_buffer.size() + r->get_buffer().size()); + _buffer.insert(_buffer.end(), r->get_buffer().begin(), + r->get_buffer().end()); + //_buffer.append(r->data(), r->size()); + } else if (timed_out) + break; + } - // Transfer data. - uint32_t rb(_buffer.size()); - if (!rb) { - if (timed_out) { - gnutls_transport_set_errno(*_session, EAGAIN); - return -1; + // Transfer data. + uint32_t rb(_buffer.size()); + if (!rb) { + if (timed_out) { + gnutls_transport_set_errno(_session, EAGAIN); + return -1; + } else { + return 0; + } + } else if (size >= rb) { + memcpy(buffer, _buffer.data(), rb); + _buffer.clear(); + return rb; } else { - return 0; + memcpy(buffer, _buffer.data(), size); + _buffer.erase(_buffer.begin(), _buffer.begin() + size); + //_buffer.remove(0, size); + return size; } - } else if (size >= rb) { - memcpy(buffer, _buffer.data(), rb); - _buffer.clear(); - return rb; - } else { - memcpy(buffer, _buffer.data(), size); - _buffer.erase(_buffer.begin(), _buffer.begin() + size); - //_buffer.remove(0, size); - return size; + } catch (const std::exception& e) { + SPDLOG_LOGGER_DEBUG(log_v2::tls(), "tls read fail: {}", e.what()); + gnutls_transport_set_errno(_session, EPIPE); + throw; } } @@ -172,10 +227,10 @@ int stream::write(std::shared_ptr const& d) { char const* ptr(packet->const_data()); int size(packet->size()); while (size > 0) { - int ret(gnutls_record_send(*_session, ptr, size)); + int ret(gnutls_record_send(_session, ptr, size)); if (ret < 0) { - log_v2::tls()->error("TLS: could not send data: {}", - gnutls_strerror(ret)); + SPDLOG_LOGGER_ERROR(log_v2::tls(), "TLS: could not send data: {}", + gnutls_strerror(ret)); throw msg_fmt("TLS: could not send data: {}", gnutls_strerror(ret)); } ptr += ret; @@ -199,9 +254,15 @@ long long stream::write_encrypted(void const* buffer, long long size) { std::vector tmp(const_cast(static_cast(buffer)), const_cast(static_cast(buffer)) + static_cast(size)); - log_v2::tls()->trace("tls write enc: {}", size); + SPDLOG_LOGGER_TRACE(log_v2::tls(), "tls write enc: {}", size); r->get_buffer() = std::move(tmp); - _substream->write(r); - _substream->flush(); + try { + _substream->write(r); + _substream->flush(); + } catch (const std::exception& e) { + SPDLOG_LOGGER_DEBUG(log_v2::tls(), "tls write fail: {}", e.what()); + gnutls_transport_set_errno(_session, EPIPE); + throw; + } return size; } diff --git a/malloc-trace/CMakeLists.txt b/malloc-trace/CMakeLists.txt new file mode 100644 index 00000000000..fe1a38c6518 --- /dev/null +++ b/malloc-trace/CMakeLists.txt @@ -0,0 +1,42 @@ +# +# Copyright 2024 Centreon +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy of +# the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations under +# the License. +# +# For more information : contact@centreon.com +# + +# Global options. +project("Centreon malloc trace" C CXX) + +# Set directories. +set(INC_DIR "${PROJECT_SOURCE_DIR}/inc/com/centreon/malloc_trace") +set(SRC_DIR "src") + +add_library(centreon-malloc-trace SHARED + "src/by_thread_trace_active.cc" + "src/malloc_trace.cc" + "src/orphan_container.cc" + "src/simply_allocator.cc" +) + +target_link_libraries(centreon-malloc-trace + CONAN_PKG::fmt +) + +target_include_directories(centreon-malloc-trace PRIVATE + ${INC_DIR} + ${CMAKE_SOURCE_DIR}/common/inc +) + +target_precompile_headers(centreon-malloc-trace PRIVATE "precomp_inc/precomp.hh") diff --git a/malloc-trace/README.md b/malloc-trace/README.md new file mode 100644 index 00000000000..812be313767 --- /dev/null +++ b/malloc-trace/README.md @@ -0,0 +1,67 @@ +# malloc-trace + +## Description + +The goal of this little library is to trace each orphan malloc free call. It overrides weak **malloc**, **realloc**, **calloc** and **free** +We store in a container in memory every malloc, free calls. We remove malloc from that container each time a free with the same address is called otherwise free is also store in the container. +Every minute (by default), we flush to disk container content: + * malloc that had not be freed and that are older than one minute + * free that has not corresponding malloc in the container. + +In order to use it you have to feed LD_PRELOAD env variable +```bash +export LD_PRELOAD=malloc-trace.so +``` +Then you can launch your executable and each call will be recorded in /tmp/malloc-trace.csv with ';' as field separator + +The columns are: +* function (malloc or free) +* thread id +* address in process mem space +* size of memory allocated +* timestamp in ms +* call stack contained in a json array + ```json + [ + { + "f": "free", + "s": "", + "l": 0 + }, + { + "f": "__gnu_cxx::new_allocator, (__gnu_cxx::_Lock_policy)2> >::deallocate(std::_Sp_counted_ptr_inplace, (__gnu_cxx::_Lock_policy)2>*, unsigned long)", + "s": "", + "l": 0 + } + ] + ``` +f field is function name +s field is source file if available +l field is source line + +This library works in that manner: +We replace all malloc, realloc, calloc and free in order to trace all calls. +We store all malloc in a container. Each time free is called, if the corresponding malloc is found, it's erased from container, +otherwise orphan free is stored in the container. +Every 60s (by default), we flush the container, all malloc older than 60s and not freed are dumped to disk, all orphan freed are also dumped. + +Output file may be moved during running, in that case it's automatically recreated. + +## Environment variables +Some parameters of the library can be overriden with environment variables. +| Environment variable | default value | description | +| ------------------------ | --------------------- | ----------------------------------------------------------------------------- | +| out_file_path | /tmp/malloc-trace.csv | path of the output file | +| out_file_max_size | 0x100000000 | when the output file size exceeds this limit, the ouput file is truncated | +| malloc_second_peremption | one minute | delay between two flushes and delay after which malloc is considered orphaned | + +## Provided scripts + +### create_malloc_trace_table.sql +This script creates a table that can store an output_file +In this scripts, you will find in comments how to load output csv file in that table. + +### remove_malloc_free.py +We store in output file malloc that aren't freed in the next minute, we also store orphan free. +So if a malloc is dumped and it's corresponding free is operated two minutes later, the two are stored in output file. +The purpose of this script is to remove these malloc-free pairs. diff --git a/malloc-trace/create_malloc_trace_table.sql b/malloc-trace/create_malloc_trace_table.sql new file mode 100644 index 00000000000..c5bdda41c48 --- /dev/null +++ b/malloc-trace/create_malloc_trace_table.sql @@ -0,0 +1,32 @@ +-- Copyright 2024 Centreon +-- +-- Licensed under the Apache License, Version 2.0 (the "License"); +-- you may not use this file except in compliance with the License. +-- You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +-- For more information : contact@centreon.com +-- + + +-- this sql code lets you to create table where you can load data from /tmp/malloc-trace.csv +-- you have to connect to the bdd with this command line: +-- mysql -h 127.0.0.1 --local-infile=1 -D centreon_storage -u centreon -pcentreon +-- then you load data with +-- load data local infile '/tmp/malloc-trace.csv' into table malloc_trace fields terminated by ';'; + +CREATE TABLE `centreon_storage`.`malloc_trace` ( + `funct_name` VARCHAR(10) NOT NULL, + `thread_id` INT UNSIGNED NULL, + `address` BIGINT UNSIGNED NULL, + `size` INT UNSIGNED NULL, + `ms_timestamp` BIGINT UNSIGNED NULL, + `call_stack` TEXT(65535) NULL, + FULLTEXT INDEX `call_stack_ind` (`call_stack`) VISIBLE); diff --git a/malloc-trace/inc/com/centreon/malloc_trace/by_thread_trace_active.hh b/malloc-trace/inc/com/centreon/malloc_trace/by_thread_trace_active.hh new file mode 100644 index 00000000000..abadb5b6c7c --- /dev/null +++ b/malloc-trace/inc/com/centreon/malloc_trace/by_thread_trace_active.hh @@ -0,0 +1,99 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#ifndef CMT_BY_THREAD_TRACE_ACTIVE_HH +#define CMT_BY_THREAD_TRACE_ACTIVE_HH + +#include "intrusive_map.hh" + +namespace com::centreon::malloc_trace { + +/** + * @brief This class is used to store the tracing of a thread + * The problem is: malloc is called, we explore call stack and this research may + * do another malloc and we risk an infinite loop + * So the first malloc set the _malloc_trace_active and explore call stack + * The next malloc called by stacktrace process try to set _malloc_trace_active + * and as it's yet setted we don't try to explore call stack + * + */ +class thread_trace_active : public boost::intrusive::set_base_hook<> { + pid_t _thread_id; + mutable bool _malloc_trace_active = false; + + public: + thread_trace_active() {} + thread_trace_active(pid_t thread_id) : _thread_id(thread_id) {} + + pid_t get_thread_id() const { return _thread_id; } + + /** + * @brief Set _malloc_trace_active + * + * @return old value of _malloc_trace_active + */ + bool set_malloc_trace_active() const { + if (_malloc_trace_active) { + return true; + } + _malloc_trace_active = true; + return false; + } + + /** + * @brief reset _malloc_trace_active + * + * @return old value of _malloc_trace_active + */ + bool reset_malloc_trace_active() const { + if (!_malloc_trace_active) { + return false; + } + _malloc_trace_active = false; + return true; + } + + bool is_malloc_trace_active() const { return _malloc_trace_active; } + + struct key_extractor { + using type = pid_t; + type operator()(const thread_trace_active& node) const { + return node._thread_id; + } + }; +}; + +/** + * @brief container of thread_trace_active with zero allocation + * the drawback is that we are limited to store 4096 thread trace states + * + */ +class thread_dump_active + : protected intrusive_map { + std::mutex _protect; + + public: + bool set_dump_active(pid_t thread_id); + void reset_dump_active(pid_t thread_id); +}; + +} // namespace com::centreon::malloc_trace + +#endif diff --git a/malloc-trace/inc/com/centreon/malloc_trace/funct_info_cache.hh b/malloc-trace/inc/com/centreon/malloc_trace/funct_info_cache.hh new file mode 100644 index 00000000000..77a5eef010a --- /dev/null +++ b/malloc-trace/inc/com/centreon/malloc_trace/funct_info_cache.hh @@ -0,0 +1,50 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ +#ifndef CMT_FUNCT_INFO_CACHE_HH +#define CMT_FUNCT_INFO_CACHE_HH + +namespace com::centreon::malloc_trace { +/** + * @brief symbol information research is very expensive + * so we store function informations in a cache + * + */ +class funct_info { + const std::string _funct_name; + const std::string _source_file; + const size_t _source_line; + + public: + funct_info(std::string&& funct_name, + std::string&& source_file, + size_t source_line) + : _funct_name(funct_name), + _source_file(source_file), + _source_line(source_line) {} + + const std::string& get_funct_name() const { return _funct_name; } + const std::string& get_source_file() const { return _source_file; } + size_t get_source_line() const { return _source_line; } +}; + +using funct_cache_map = + std::map; + +} // namespace com::centreon::malloc_trace + +#endif diff --git a/malloc-trace/inc/com/centreon/malloc_trace/intrusive_map.hh b/malloc-trace/inc/com/centreon/malloc_trace/intrusive_map.hh new file mode 100644 index 00000000000..298c5be90bf --- /dev/null +++ b/malloc-trace/inc/com/centreon/malloc_trace/intrusive_map.hh @@ -0,0 +1,81 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ +#ifndef CMT_INTRUSIVE_MAP_HH +#define CMT_INTRUSIVE_MAP_HH + +namespace com::centreon::malloc_trace { + +/** + * @brief The goal of this class is to provide map without allocation + * + * @tparam node_type node (key and data) that must inherit from + * boost::intrusive::set_base_hook<> + * @tparam key_extractor struct with an operator that extract key from node_type + * @tparam node_arrray_size size max of the container + */ +template +class intrusive_map { + public: + using key_type = typename key_extractor::type; + + private: + node_type _nodes_array[node_array_size]; + node_type* _free_node = _nodes_array; + const node_type* _array_end = _free_node + node_array_size; + + using node_map = + boost::intrusive::set >; + + node_map _nodes; + + public: + ~intrusive_map() { _nodes.clear(); } + + const node_type* find(const key_type& key) const { + auto found = _nodes.find(key); + if (found == _nodes.end()) { + return nullptr; + } else { + return &*found; + } + } + + const node_type* insert_and_get(const key_type& key) { + if (_free_node >= _array_end) { + return nullptr; + } + + node_type* to_insert = _free_node++; + new (to_insert) node_type(key); + _nodes.insert(*to_insert); + return to_insert; + } + + /** + * @brief sometimes method are called before object construction + * + * @return true constructor has been called + * @return false + */ + bool is_initialized() const { return _free_node; } +}; + +} // namespace com::centreon::malloc_trace + +#endif diff --git a/malloc-trace/inc/com/centreon/malloc_trace/orphan_container.hh b/malloc-trace/inc/com/centreon/malloc_trace/orphan_container.hh new file mode 100644 index 00000000000..061c327d35c --- /dev/null +++ b/malloc-trace/inc/com/centreon/malloc_trace/orphan_container.hh @@ -0,0 +1,211 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#ifndef CMT_ORPHAN_CONTAINER_HH +#define CMT_ORPHAN_CONTAINER_HH + +#include + +#include "funct_info_cache.hh" + +namespace com::centreon::malloc_trace { + +constexpr size_t max_backtrace_size = 15; + +/** + * @brief information of free or malloc with stacktrace + * In this bean, we store: + * - allocated address + * - size of memory allocated (0 if free) + * - thread id + * - function name: malloc, free, realloc or freerealloc + * - length of the backtrace + * - backtrace + * - timestamp + */ +class backtrace_info { + const void* _allocated; + const size_t _allocated_size; + const pid_t _thread_id; + const std::string_view _funct_name; + size_t _backtrace_size; + boost::stacktrace::frame::native_frame_ptr_t _backtrace[max_backtrace_size]; + const std::chrono::system_clock::time_point _last_allocated; + + public: + backtrace_info(const void* allocated, + size_t allocated_size, + pid_t thread_id, + const std::string_view& funct_name, + const boost::stacktrace::stacktrace& backtrace, + size_t backtrace_offset); + + const void* get_allocated() const { return _allocated; } + size_t get_allocated_size() const { return _allocated_size; } + size_t get_backtrace_size() const { return _backtrace_size; } + const boost::stacktrace::frame::native_frame_ptr_t* get_backtrace() const { + return _backtrace; + } + const std::chrono::system_clock::time_point& get_last_allocated() const { + return _last_allocated; + } + + void to_file(int fd, funct_cache_map& funct_info_cache) const; +}; + +/** + * @brief infos of malloc + * as this object is stored in 2 containers, it has to set hooks: + * set_base_hook and allocated_time_hook + * + */ +class orphan_malloc : public backtrace_info, + public boost::intrusive::set_base_hook<> { + public: + boost::intrusive::set_member_hook<> allocated_time_hook; + + orphan_malloc(const void* allocated, + size_t allocated_size, + pid_t thread_id, + const std::string_view& funct_name, + const boost::stacktrace::stacktrace& backtrace, + size_t backtrace_offset) + : backtrace_info(allocated, + allocated_size, + thread_id, + funct_name, + backtrace, + backtrace_offset) {} + + // key extractor used to create a map addr to orphan_malloc + struct address_extractor { + using type = const void*; + type operator()(const orphan_malloc& node) const { + return node.get_allocated(); + } + }; + + // key extractor used to create a map time_alloc to orphan_malloc + struct time_allocated_extractor { + using type = std::chrono::system_clock::time_point; + const type& operator()(const orphan_malloc& node) const { + return node.get_last_allocated(); + } + }; +}; + +/** + * @brief infos of a free + * this object is stored in single link list: slist_base_hook + * + */ +class orphan_free : public backtrace_info, + public boost::intrusive::slist_base_hook<> { + public: + orphan_free(const void* allocated, + pid_t thread_id, + const std::string_view& funct_name, + const boost::stacktrace::stacktrace& backtrace, + size_t backtrace_offset) + : backtrace_info(allocated, + 0, + thread_id, + funct_name, + backtrace, + backtrace_offset) {} +}; + +/** + * @brief this object contains all orphan mallocs (malloc without free) and all + * orphan frees In order to limit allocation and improve performance, all + * objects are stored in intrusive container and allocated in simple node + * allocator (allocator that doesn't allow to allocate multiple object at once) + * + */ +class orphan_container { + // malloc part + // map alloc adress => orphan_malloc + using orphan_malloc_address_set = boost::intrusive::set< + orphan_malloc, + boost::intrusive::key_of_value>; + + // map time alloc => orphan_malloc + using orphan_malloc_time_hook = + boost::intrusive::member_hook, + &orphan_malloc::allocated_time_hook>; + using orphan_malloc_time_set = boost::intrusive::multiset< + orphan_malloc, + orphan_malloc_time_hook, + boost::intrusive::key_of_value>; + + // node allocator used to create orphan_malloc + using orphan_malloc_allocator = com::centreon::common:: + node_allocator, 0x100000>; + + orphan_malloc_address_set _address_to_malloc; + orphan_malloc_time_set _time_to_malloc; + orphan_malloc_allocator _malloc_allocator; + + // free part + // orphan_free are stored in single linked list + using orphan_free_list = + boost::intrusive::slist>; + + // node allocator used to create orphan_free + using orphan_free_allocator = com::centreon::common:: + node_allocator, 0x100000>; + + orphan_free_list _free; + orphan_free_allocator _free_allocator; + + funct_cache_map _funct_info_cache; + + std::chrono::system_clock::duration _malloc_peremption; + std::chrono::system_clock::time_point _last_flush; + size_t _max_file_size; + std::string_view _out_file_path; + + mutable std::mutex _protect; + + int open_file(); + + public: + orphan_container(); + + void add_malloc(const void* addr, + size_t allocated_size, + pid_t thread_id, + const std::string_view& funct_name, + const boost::stacktrace::stacktrace& backtrace, + size_t backtrace_offset); + + bool free(const void* addr); + + void add_free(const void* addr, + pid_t thread_id, + const std::string_view& funct_name, + const boost::stacktrace::stacktrace& backtrace, + size_t backtrace_offset); + + void flush_to_file(); +}; + +} // namespace com::centreon::malloc_trace + +#endif diff --git a/malloc-trace/inc/com/centreon/malloc_trace/simply_allocator.hh b/malloc-trace/inc/com/centreon/malloc_trace/simply_allocator.hh new file mode 100644 index 00000000000..23daa26b958 --- /dev/null +++ b/malloc-trace/inc/com/centreon/malloc_trace/simply_allocator.hh @@ -0,0 +1,59 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#ifndef CMT_SIMPLY_ALLOCATOR_HH +#define CMT_SIMPLY_ALLOCATOR_HH + +namespace com::centreon::malloc_trace { + +constexpr unsigned block_size = 4096; +constexpr unsigned nb_block = 256; +/** + * @brief basic allocator + * At the beginning, we don't know original malloc + * we must provide a simple malloc free for dlsym + * + */ +class simply_allocator { + class node_block { + unsigned char _buff[block_size]; + bool _free = true; + + public: + struct key_extractor { + using type = unsigned char const*; + type operator()(const node_block& block) const { return block._buff; } + }; + + bool is_free() const { return _free; } + void set_free(bool free) { _free = free; } + unsigned char* get_buff() { return _buff; } + }; + + node_block _blocks[nb_block]; + std::mutex _protect; + + public: + void* malloc(size_t size); + void* realloc(void* p, size_t size); + bool free(void* p); +}; + +} // namespace com::centreon::malloc_trace + +#endif diff --git a/malloc-trace/precomp_inc/precomp.hh b/malloc-trace/precomp_inc/precomp.hh new file mode 100644 index 00000000000..48f8663ed1a --- /dev/null +++ b/malloc-trace/precomp_inc/precomp.hh @@ -0,0 +1,34 @@ +/* +** Copyright 2024 Centreon +** +** This file is part of Centreon Engine. +** +** Centreon Engine is free software: you can redistribute it and/or +** modify it under the terms of the GNU General Public License version 2 +** as published by the Free Software Foundation. +** +** Centreon Engine is distributed in the hope that it will be useful, +** but WITHOUT ANY WARRANTY; without even the implied warranty of +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +** General Public License for more details. +** +** You should have received a copy of the GNU General Public License +** along with Centreon Engine. If not, see +** . +*/ + +#ifndef CMT_PRECOMP_HH +#define CMT_PRECOMP_HH + +#include +#include + +#include +#include +#include + +#include +#include +#include + +#endif diff --git a/malloc-trace/remove_malloc_free.py b/malloc-trace/remove_malloc_free.py new file mode 100755 index 00000000000..551aa4651e0 --- /dev/null +++ b/malloc-trace/remove_malloc_free.py @@ -0,0 +1,62 @@ +#!/usr/bin/python3 +# +# Copyright 2024 Centreon +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# For more information : contact@centreon.com +# +# This script takes a file produced by centreon-malloc-trace library and remove all malloc free pairs + +import sys +import getopt +import csv + + + + +def main(argv): + csv.field_size_limit(sys.maxsize) + inputfile = '' + outputfile = '' + opts, args = getopt.getopt(argv,"hi:o:",["in_file=","out_file="]) + for opt, arg in opts: + if opt == '-h': + print ('remove_malloc_free.py -i -o ') + return + elif opt in ("-i", "--in_file"): + inputfile = arg + elif opt in ("-o", "--out_file"): + outputfile = arg + + if inputfile == '' or outputfile == '': + print ('remove_malloc_free.py -i -o ') + return + allocated = {} + with open(inputfile) as csv_file: + csv_reader = csv.reader(csv_file, delimiter=';') + for row in csv_reader: + if len(row) > 2 and row[2].isdigit(): + if row[0].find('free') >= 0: + if (row[2] in allocated): + allocated.pop(row[2]) + else: + allocated[row[2]] = row + with open(outputfile, 'w') as f: + for row in allocated.values(): + f.write(';'.join(row) ) + f.write('\n') + + +if __name__ == "__main__": + main(sys.argv[1:]) diff --git a/malloc-trace/src/by_thread_trace_active.cc b/malloc-trace/src/by_thread_trace_active.cc new file mode 100644 index 00000000000..91fbc934f8a --- /dev/null +++ b/malloc-trace/src/by_thread_trace_active.cc @@ -0,0 +1,57 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include "by_thread_trace_active.hh" + +using namespace com::centreon::malloc_trace; + +/** + * @brief Set the flag to true in _by_thread_dump_active + * + * @return true the flag was not setted before call + * @return false the flag was yet setted before call + */ +bool thread_dump_active::set_dump_active(pid_t thread_id) { + std::lock_guard l(_protect); + if (!is_initialized()) { + return false; + } + const thread_trace_active* exist = find(thread_id); + + if (!exist) { + const thread_trace_active* inserted = insert_and_get(thread_id); + if (!inserted) { + return false; + } + inserted->set_malloc_trace_active(); + return true; + } else { + return !exist->set_malloc_trace_active(); + } +} + +void thread_dump_active::reset_dump_active(pid_t thread_id) { + std::lock_guard l(_protect); + if (!is_initialized()) { + return; + } + const thread_trace_active* exist = find(thread_id); + if (exist) { + exist->reset_malloc_trace_active(); + } +} diff --git a/malloc-trace/src/malloc_trace.cc b/malloc-trace/src/malloc_trace.cc new file mode 100644 index 00000000000..38bf8ea2463 --- /dev/null +++ b/malloc-trace/src/malloc_trace.cc @@ -0,0 +1,242 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include +#include + +#include "by_thread_trace_active.hh" +#include "funct_info_cache.hh" +#include "orphan_container.hh" +#include "simply_allocator.hh" + +using namespace com::centreon::malloc_trace; + + +extern void* __libc_malloc(size_t size); + +pid_t gettid() __attribute__((weak)); + +/** + * @brief gettid is not available on alma8 + * + * @return pid_t + */ +pid_t m_gettid() { + if (gettid) { + return gettid(); + } else { + return syscall(__NR_gettid); + } +} + +/** + * @brief when we enter in malloc or free, we store information of stack trace, + * this will generate allocation that we don't want to store, this object allow + * us to store the active tracing on the active thread in order to avoid store + * recursion. + * + */ +static thread_dump_active _thread_dump_active; + +/** + * @brief simply allocator used by dlsym + * + */ +static simply_allocator _first_malloc; + +/** + * @brief the container that store every malloc and free + * + */ +static orphan_container* _orphans = new orphan_container; + +static void* first_malloc(size_t size) { + return _first_malloc.malloc(size); +} + +static void* first_realloc(void* p, size_t size) { + return _first_malloc.realloc(p, size); +} + +typedef void* (*malloc_signature)(size_t); + +// will be filled by original malloc +static malloc_signature original_malloc = first_malloc; + +typedef void* (*realloc_signature)(void*, size_t); + +// will be filled by original realloc +static realloc_signature original_realloc = first_realloc; + +static void first_free(void* p) { + _first_malloc.free(p); +} + +typedef void (*free_signature)(void*); +// will be filled by original free +static free_signature original_free = first_free; + +/** + * @brief there is 3 stages + * on the first alloc, we don't know malloc, realloc and free address + * So we call dlsym to get these address + * As dlsym allocates memory, we are in dlsym_running state and we provide + * allocation mechanism by simply_allocator. + * Once dlsym are done, we are in hook state + * + */ +enum class e_library_state { not_hooked, dlsym_running, hooked }; +static e_library_state _state = e_library_state::not_hooked; + +static void search_symbols() { + original_malloc = + reinterpret_cast(dlsym(RTLD_NEXT, "malloc")); + original_free = reinterpret_cast(dlsym(RTLD_NEXT, "free")); + original_realloc = + reinterpret_cast(dlsym(RTLD_NEXT, "realloc")); +} + +/** + * @brief our malloc + * + * @param size + * @param funct_name function name logged + * @return void* + */ +static void* malloc(size_t size, const char* funct_name) { + switch (_state) { + case e_library_state::not_hooked: + _state = e_library_state::dlsym_running; + search_symbols(); + _state = e_library_state::hooked; + break; + case e_library_state::dlsym_running: + return first_malloc(size); + default: + break; + } + + void* p = original_malloc(size); + + pid_t thread_id = m_gettid(); + bool have_to_dump = _thread_dump_active.set_dump_active(thread_id); + + // if this thread is not yet dumping => store it + if (have_to_dump) { + if (_orphans) { + _orphans->add_malloc(p, size, thread_id, funct_name, + boost::stacktrace::stacktrace(), 2); + _orphans->flush_to_file(); + } + _thread_dump_active.reset_dump_active(thread_id); + } + return p; +} + +/** + * @brief our realloc function + * + * @param p + * @param size + * @return void* + */ +void* realloc(void* p, size_t size) { + switch (_state) { + case e_library_state::not_hooked: + _state = e_library_state::dlsym_running; + search_symbols(); + _state = e_library_state::hooked; + break; + case e_library_state::dlsym_running: + return first_realloc(p, size); + default: + break; + } + void* new_p = original_realloc(p, size); + pid_t thread_id = m_gettid(); + bool have_to_dump = _thread_dump_active.set_dump_active(thread_id); + // if this thread is not yet dumping => call dump_callstack + if (have_to_dump) { + constexpr std::string_view realloc_funct_name("realloc"); + // if pointer has changed, we record a free + if (new_p != p && p) { + if (!_orphans->free(p)) { + constexpr std::string_view free_funct_name("freerealloc"); + _orphans->add_free(p, thread_id, free_funct_name, + boost::stacktrace::stacktrace(), 2); + } + } + _orphans->add_malloc(new_p, size, thread_id, realloc_funct_name, + boost::stacktrace::stacktrace(), 2); + _orphans->flush_to_file(); + _thread_dump_active.reset_dump_active(thread_id); + } + return new_p; +} + +/** + * @brief replacement of the original malloc + * + * @param size + * @return void* + */ +void* malloc(size_t size) { + return malloc(size, "malloc"); +} + +/** + * @brief our calloc function + * call to malloc + * + * @param num + * @param size + * @return void* + */ +void* calloc(size_t num, size_t size) { + size_t total_size = num * size; + void* p = malloc(total_size, "calloc"); + memset(p, 0, total_size); + return p; +} + +/** + * @brief our free + * + * @param p + */ +void free(void* p) { + if (_first_malloc.free(p)) + return; + original_free(p); + if (!p) + return; + + pid_t thread_id = m_gettid(); + bool have_to_dump = _thread_dump_active.set_dump_active(thread_id); + + // if this thread is not yet dumping => call dump_callstack + if (have_to_dump) { + if (!_orphans->free(p)) { + constexpr std::string_view free_funct_name("free"); + _orphans->add_free(p, thread_id, free_funct_name, + boost::stacktrace::stacktrace(), 2); + _orphans->flush_to_file(); + } + _thread_dump_active.reset_dump_active(thread_id); + } +} diff --git a/malloc-trace/src/orphan_container.cc b/malloc-trace/src/orphan_container.cc new file mode 100644 index 00000000000..091a43eccda --- /dev/null +++ b/malloc-trace/src/orphan_container.cc @@ -0,0 +1,304 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include +#include + +#include "orphan_container.hh" + +using namespace com::centreon::malloc_trace; + +/** + * @brief Construct a new backtrace info::backtrace info object + * + * @param allocated address allocated or freed + * @param allocated_size size allocated + * @param thread_id + * @param funct_name malloc, free or freerealloc + * @param backtrace + * @param backtrace_offset we will ignore backtrace_offset first frames + */ +backtrace_info::backtrace_info(const void* allocated, + size_t allocated_size, + pid_t thread_id, + const std::string_view& funct_name, + const boost::stacktrace::stacktrace& backtrace, + size_t backtrace_offset) + : _allocated(allocated), + _allocated_size(allocated_size), + _thread_id(thread_id), + _funct_name(funct_name), + _backtrace_size(0), + _last_allocated(std::chrono::system_clock::now()) { + if (backtrace_offset >= backtrace.size()) { + return; + } + + boost::stacktrace::stacktrace::const_iterator frame_iter = backtrace.begin(); + std::advance(frame_iter, backtrace_offset); + for (; frame_iter != backtrace.end() && _backtrace_size < max_backtrace_size; + ++frame_iter) { + _backtrace[_backtrace_size++] = frame_iter->address(); + } +} + +/** + * @brief add a line in output file + * + * @param fd file descriptor + * @param funct_info_cache map address=>function info where we store infos of + * functions (name, source line) + */ +void backtrace_info::to_file(int fd, funct_cache_map& funct_info_cache) const { + if (fd <= 0) { + return; + } + constexpr unsigned size_buff = 0x40000; + char buff[size_buff]; + char* end_buff = buff + size_buff - 10; + *end_buff = 0; + + char* work_pos = + fmt::format_to_n(buff, size_buff, "\"{}\";{};{};{};{};\"[", _funct_name, + _thread_id, reinterpret_cast(_allocated), + _allocated_size, + std::chrono::duration_cast( + _last_allocated.time_since_epoch()) + .count()) + .out; + + for (unsigned stack_cpt = 0; stack_cpt < _backtrace_size; ++stack_cpt) { + const boost::stacktrace::frame::native_frame_ptr_t addr = + _backtrace[stack_cpt]; + boost::stacktrace::frame frame(addr); + if (end_buff - work_pos < 1000) { + break; + } + + if (stack_cpt) { + *work_pos++ = ','; + } + funct_cache_map::const_iterator cache_entry = funct_info_cache.find(addr); + if (cache_entry == funct_info_cache.end()) { // not found => search and + // save + funct_info to_insert(frame.name(), frame.source_file(), + frame.source_line()); + cache_entry = funct_info_cache.emplace(addr, to_insert).first; + } + + if (cache_entry->second.get_source_file().empty()) { + work_pos = fmt::format_to_n(work_pos, end_buff - work_pos, + "{{\\\"f\\\":\\\"{}\\\" }}", + cache_entry->second.get_funct_name()) + .out; + } else { + work_pos = + fmt::format_to_n( + work_pos, end_buff - work_pos, + "{{\\\"f\\\":\\\"{}\\\" , \\\"s\\\":\\\"{}\\\" , \\\"l\\\":{}}}", + cache_entry->second.get_funct_name(), + cache_entry->second.get_source_file(), + cache_entry->second.get_source_line()) + .out; + } + } + + *work_pos++ = ']'; + *work_pos++ = '"'; + *work_pos++ = '\n'; + + ::write(fd, buff, work_pos - buff); +} + +/********************************************************************************* + orphan_container +*********************************************************************************/ + +/** + * @brief Construct a new orphan container::orphan container object + * + */ +orphan_container::orphan_container() + : _malloc_allocator(std::allocator()), + _free_allocator(std::allocator()) { + char* env_out_file_max_size = getenv("out_file_max_size"); + if (env_out_file_max_size && atoll(env_out_file_max_size) > 0) + _max_file_size = atoll(env_out_file_max_size); + else + _max_file_size = 0x100000000; + + char* env_out_file_path = getenv("out_file_path"); + if (env_out_file_path && strlen(env_out_file_path) > 0) + _out_file_path = env_out_file_path; + + else + _out_file_path = "/tmp/malloc-trace.csv"; + + char* malloc_second_peremption = getenv("malloc_second_peremption"); + if (malloc_second_peremption && atoi(malloc_second_peremption) > 0) + _malloc_peremption = std::chrono::seconds(atoi(malloc_second_peremption)); + else + _malloc_peremption = std::chrono::minutes(1); +} + +/** + * @brief register a malloc action, it can be a malloc or a realloc + * + * @param addr address allocated + * @param allocated_size size allocated + * @param thread_id + * @param funct_name + * @param backtrace + * @param backtrace_offset we will ignore backtrace_offset first frames + */ +void orphan_container::add_malloc( + const void* addr, + size_t allocated_size, + pid_t thread_id, + const std::string_view& funct_name, + const boost::stacktrace::stacktrace& backtrace, + size_t backtrace_offset) { + std::lock_guard l(_protect); + orphan_malloc* new_node = _malloc_allocator.allocate(); + new (new_node) orphan_malloc(addr, allocated_size, thread_id, funct_name, + backtrace, backtrace_offset); + + if (!_address_to_malloc.insert(*new_node).second) { + _malloc_allocator.deallocate(new_node); + } else { + _time_to_malloc.insert(*new_node); + } +} + +/** + * @brief when program call free we try to unregister previous malloc at addr + * + * @param addr address to free + * @return true malloc was found and unregistered + * @return false no malloc found for this address + */ +bool orphan_container::free(const void* addr) { + std::lock_guard l(_protect); + auto found = _address_to_malloc.find(addr); + if (found != _address_to_malloc.end()) { + orphan_malloc& to_erase = *found; + + //_time_to_malloc is only indexed by alloc timestamp (ms) + auto where_to_search = + _time_to_malloc.equal_range(to_erase.get_last_allocated()); + for (; where_to_search.first != where_to_search.second; + ++where_to_search.first) { + if (&*where_to_search.first == &to_erase) { + _time_to_malloc.erase(where_to_search.first); + break; + } + } + + _address_to_malloc.erase(found); + _malloc_allocator.deallocate(&to_erase); + return true; + } else { + return false; + } +} + +/** + * @brief in case or free has returned false, we have to add free orphan in this + * container + * + * @param addr address freed + * @param thread_id + * @param funct_name + * @param backtrace + * @param backtrace_offset we will ignore backtrace_offset first frames + */ +void orphan_container::add_free(const void* addr, + pid_t thread_id, + const std::string_view& funct_name, + const boost::stacktrace::stacktrace& backtrace, + size_t backtrace_offset) { + std::lock_guard l(_protect); + orphan_free* new_free = _free_allocator.allocate(); + new (new_free) + orphan_free(addr, thread_id, funct_name, backtrace, backtrace_offset); + _free.push_back(*new_free); +} + +/** + * @brief flush contents to disk + * all malloc older than _malloc_peremption are flushed + * more recent mallocs are not flushed because we hope a free that will + * unregister its all orphan free are flushed all data flushed are remove from + * container + * + */ +void orphan_container::flush_to_file() { + std::lock_guard l(_protect); + std::chrono::system_clock::time_point now = std::chrono::system_clock::now(); + if (_last_flush + _malloc_peremption > now) { + return; + } + + int fd = open_file(); + + _last_flush = now; + + // we flush to disk oldest malloc and remove its from the container + if (!_time_to_malloc.empty()) { + orphan_malloc_time_set::iterator upper = _time_to_malloc.upper_bound( + std::chrono::system_clock::now() - _malloc_peremption); + for (orphan_malloc_time_set::iterator to_flush = _time_to_malloc.begin(); + to_flush != upper; ++to_flush) { + to_flush->to_file(fd, _funct_info_cache); + _address_to_malloc.erase(to_flush->get_allocated()); + } + _time_to_malloc.erase_and_dispose( + _time_to_malloc.begin(), upper, [this](orphan_malloc* to_dispose) { + _malloc_allocator.deallocate(to_dispose); + }); + } + + // we flush all free + for (const orphan_free& to_flush : _free) { + to_flush.to_file(fd, _funct_info_cache); + } + _free.clear_and_dispose([this](orphan_free* to_dispose) { + _free_allocator.deallocate(to_dispose); + }); + ::close(fd); +} + +/** + * @brief this function open out file if it hasn't be done + * if file size exceed 256Mo, file is truncated + * + * @return int file descriptor + */ +int orphan_container::open_file() { + int out_file_fd = open(_out_file_path.data(), O_APPEND | O_CREAT | O_RDWR, + S_IRUSR | S_IWUSR); + if (out_file_fd > 0) { + struct stat file_stat; + if (!fstat(out_file_fd, &file_stat)) { + if (file_stat.st_size > _max_file_size) { + ftruncate(out_file_fd, 0); + } + } + } + return out_file_fd; +} diff --git a/malloc-trace/src/simply_allocator.cc b/malloc-trace/src/simply_allocator.cc new file mode 100644 index 00000000000..84b6e4c7841 --- /dev/null +++ b/malloc-trace/src/simply_allocator.cc @@ -0,0 +1,78 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include "simply_allocator.hh" + +using namespace com::centreon::malloc_trace; + +/** + * @brief same as malloc + * + * @param size + * @return void* + */ +void* simply_allocator::malloc(size_t size) { + if (size > block_size) { + return nullptr; + } + std::lock_guard l(_protect); + for (node_block* search = _blocks; search != _blocks + nb_block; ++search) { + if (search->is_free()) { + search->set_free(false); + return search->get_buff(); + } + } + return nullptr; +} + +/** + * @brief reallocate a pointer, + * if size > block_size or p doesn't belong to simply_allocator, it returns + * nullptr + * + * @param p + * @param size + * @return void* + */ +void* simply_allocator::realloc(void* p, size_t size) { + if (p < _blocks || p >= _blocks + block_size) + return nullptr; + if (size > block_size) { + return nullptr; + } + return p; +} + +/** + * @brief same as free + * + * @param p + * @return true if the pointer belong to this allocator + */ +bool simply_allocator::free(void* p) { + if (p < _blocks || p >= _blocks + block_size) + return false; + std::lock_guard l(_protect); + for (node_block* search = _blocks; search != _blocks + nb_block; ++search) { + if (search->get_buff() == p) { + search->set_free(true); + return true; + } + } + return false; +} diff --git a/packaging/centreon-broker-core.yaml b/packaging/centreon-broker-core.yaml index f505fb4dd55..998732bbd50 100644 --- a/packaging/centreon-broker-core.yaml +++ b/packaging/centreon-broker-core.yaml @@ -54,10 +54,10 @@ overrides: - centreon-broker-core-devel deb: depends: - - lua5.3 + - ${LUA_VERSION} - centreon-clib (= ${VERSION}-${RELEASE}${DIST}) - centreon-broker (= ${VERSION}-${RELEASE}${DIST}) - - libgnutls30 + - ${LIB_GNU_TLS_VERSION} conflicts: - centreon-broker-storage - centreon-broker-core-dev diff --git a/packaging/centreon-collect.yaml b/packaging/centreon-collect.yaml index 6da569cfe3a..e1ab618aac6 100644 --- a/packaging/centreon-collect.yaml +++ b/packaging/centreon-collect.yaml @@ -30,12 +30,8 @@ contents: owner: centreon-engine group: centreon-engine - - src: "./files/empty_file" - dst: "/var/log/centreon-engine/retention.dat" - file_info: - mode: 0755 - owner: centreon-engine - group: centreon-engine + - dst: "/var/log/centreon-engine/retention.dat" + type: ghost - src: "./files/empty_file" dst: "/var/log/centreon-engine/status.dat" diff --git a/packaging/scripts/env/.env.bookworm b/packaging/scripts/env/.env.bookworm new file mode 100644 index 00000000000..538e115d971 --- /dev/null +++ b/packaging/scripts/env/.env.bookworm @@ -0,0 +1,2 @@ +LIB_GNU_TLS_VERSION=libgnutls30 +LUA_VERSION=lua5.3 diff --git a/packaging/scripts/env/.env.bullseye b/packaging/scripts/env/.env.bullseye new file mode 100644 index 00000000000..538e115d971 --- /dev/null +++ b/packaging/scripts/env/.env.bullseye @@ -0,0 +1,2 @@ +LIB_GNU_TLS_VERSION=libgnutls30 +LUA_VERSION=lua5.3 diff --git a/tests/broker-engine/rrd.robot b/tests/broker-engine/rrd.robot index 87045b922c4..a033f584692 100644 --- a/tests/broker-engine/rrd.robot +++ b/tests/broker-engine/rrd.robot @@ -384,6 +384,9 @@ BRRDRMU1 Should Be True ... ${result} ... Data before RRD rebuild contain alternatively the metric ID and 0. The expected average is metric_id / 2. + # 48 = 60(octal) + ${result} Has File Permissions ${VarRoot}/lib/centreon/metrics/${m}.rrd 48 + Should Be True ${result} ${VarRoot}/lib/centreon/metrics/${m}.rrd has not RW group permission END FOR ${index_id} IN @{index} @@ -429,6 +432,7 @@ RRD1 ${result} Find In Log With Timeout ${rrdLog} ${start} ${content1} 45 Should Not Be True ${result} Database did not receive command to rebuild metrics + *** Keywords *** Test Clean Stop Engine diff --git a/tests/resources/Common.py b/tests/resources/Common.py index 1903c3b6914..7edb6b1fed7 100644 --- a/tests/resources/Common.py +++ b/tests/resources/Common.py @@ -1484,3 +1484,18 @@ def get_uid(): def set_uid(user_id: int): os.setuid(user_id) + + +def has_file_permissions(path: str, permission: int): + """! test if file has permission passed in parameter + it does a AND with permission parameter + @param path path of the file + @permission mask to test file permission + @return True if the file has the requested permissions + """ + stat_res= os.stat(path) + if stat_res is None: + logger.console(f"fail to get permission of {path}") + return False + masked = stat_res.st_mode & permission + return masked == permission