pax_global_header00006660000000000000000000000064151276400020014507gustar00rootroot0000000000000052 comment=f8f7caecfd9fd416789bdf848ec0c997da863b91 prometheus-postfix-exporter-0.18.0/000077500000000000000000000000001512764000200173305ustar00rootroot00000000000000prometheus-postfix-exporter-0.18.0/.dockerignore000066400000000000000000000001021512764000200217750ustar00rootroot00000000000000# Created by .ignore support plugin (hsz.mobi) .idea/ *.iml .git/ prometheus-postfix-exporter-0.18.0/.github/000077500000000000000000000000001512764000200206705ustar00rootroot00000000000000prometheus-postfix-exporter-0.18.0/.github/CODEOWNERS000066400000000000000000000000121512764000200222540ustar00rootroot00000000000000* @hsn723 prometheus-postfix-exporter-0.18.0/.github/dependabot.yml000066400000000000000000000021261512764000200235210ustar00rootroot00000000000000version: 2 updates: - package-ecosystem: "gomod" directory: "/" schedule: interval: "daily" allow: - dependency-type: "all" groups: docker: patterns: - "^github.com/containerd/*" - "^github.com/distribution/*" - "^github.com/docker/*" - "^github.com/moby/*" - "^github.com/opencontainers/*" otel: patterns: - "^github.com/prometheus/*" - "^go.opentelemetry.io/*" golang: patterns: - "^golang.org/x/*" other: patterns: - "*" exclude-patterns: - "^github.com/containerd/*" - "^github.com/distribution/*" - "^github.com/docker/*" - "^github.com/moby/*" - "^github.com/opencontainers/*" - "^github.com/prometheus/*" - "^go.opentelemetry.io/*" - "^golang.org/x/*" - package-ecosystem: "docker" directory: "/" schedule: interval: "weekly" - package-ecosystem: "github-actions" directory: "/" schedule: interval: "daily" prometheus-postfix-exporter-0.18.0/.github/mergify.yml000066400000000000000000000015601512764000200230570ustar00rootroot00000000000000pull_request_rules: - name: Automatic merge conditions: - "#approved-reviews-by>=1" - "#review-requested=0" - "#changes-requested-reviews-by=0" actions: merge: method: squash - name: Automatic merge for dependabot conditions: - author~=^dependabot\[bot\]$ - or: - files~=^go\.(mod|sum)$ - files~=^Dockerfile$ - files~=^\.github/workflows/.*\.ya?ml$ - -files~=^(?!go\.(mod|sum)$|Dockerfile$|\.github/workflows/.*\.ya?ml$).* actions: review: type: APPROVE merge: method: squash - name: Automatic merge for pre-commit conditions: - author~=^pre-commit-ci\[bot\]$ - title~=^\[pre-commit\.ci\] pre-commit autoupdate$ - files~=^\.pre-commit-config\.yaml$ actions: review: type: APPROVE merge: method: squash prometheus-postfix-exporter-0.18.0/.github/release.yml000066400000000000000000000005141512764000200230330ustar00rootroot00000000000000changelog: categories: - title: Breaking Changes labels: - breaking-change - title: New Features labels: - feature - title: Bug Fixes labels: - bug - title: Dependencies and Chore labels: - dependencies - title: Other Changes labels: - "*" prometheus-postfix-exporter-0.18.0/.github/workflows/000077500000000000000000000000001512764000200227255ustar00rootroot00000000000000prometheus-postfix-exporter-0.18.0/.github/workflows/dependency-review.yml000066400000000000000000000004411512764000200270640ustar00rootroot00000000000000name: Dependency Review on: [pull_request] permissions: contents: read pull-requests: write jobs: dependency-review: runs-on: ubuntu-latest steps: - uses: actions/checkout@v6 - uses: actions/dependency-review-action@v4 with: comment-summary-in-pr: true prometheus-postfix-exporter-0.18.0/.github/workflows/helm-release.yml000066400000000000000000000014151512764000200260140ustar00rootroot00000000000000name: Release Helm chart on: push: branches: - master paths: - 'charts/postfix-exporter/Chart.yaml' permissions: contents: write jobs: release: runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v6 with: fetch-depth: 0 - name: Configure Git run: | git config user.name "$GITHUB_ACTOR" git config user.email "$GITHUB_ACTOR@users.noreply.github.com" - name: Install Helm uses: azure/setup-helm@v4 - name: Run chart-releaser uses: helm/chart-releaser-action@v1.7.0 with: mark_as_latest: false env: CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}" CR_RELEASE_NAME_TEMPLATE: "helm-v{{ .Version }}" prometheus-postfix-exporter-0.18.0/.github/workflows/helm.yml000066400000000000000000000017751512764000200244070ustar00rootroot00000000000000name: Lint and test Helm charts on: pull_request: paths: - 'charts/**' - '!**.md' permissions: contents: read jobs: lint-test: runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v6 with: fetch-depth: 0 - uses: actions/setup-python@v6 - name: Set up chart-testing uses: helm/chart-testing-action@v2.8.0 - name: Run chart-testing (list-changed) id: list-changed run: | changed=$(ct list-changed --config ct.yaml) if [[ -n "$changed" ]]; then echo "changed=true" >> $GITHUB_OUTPUT fi - name: Run chart-testing (lint) run: ct lint --config ct.yaml - name: Create kind cluster uses: helm/kind-action@v1.13.0 if: steps.list-changed.outputs.changed == 'true' - name: Run chart-testing (install) run: ct install --config ct.yaml --helm-extra-set-args "--set=deployment.extraArgs={--no-postfix.logfile_must_exist}" prometheus-postfix-exporter-0.18.0/.github/workflows/main.yml000066400000000000000000000044771512764000200244100ustar00rootroot00000000000000name: main on: push: branches: - master tags-ignore: - '*' pull_request: branches: - master env: go-version: stable permissions: contents: read jobs: build: name: Build runs-on: ubuntu-latest steps: - uses: actions/checkout@v6 - name: Setup Go uses: actions/setup-go@v6 with: go-version: ${{ env.go-version }} - name: Build run: make build lint: name: Lint runs-on: ubuntu-latest steps: - uses: actions/checkout@v6 - uses: actions/setup-python@v6 - name: Setup Go uses: actions/setup-go@v6 with: go-version: ${{ env.go-version }} - name: Lint run: make lint - name: Lint run: make lint test: name: Test runs-on: ubuntu-latest steps: - uses: actions/checkout@v6 - name: Setup Go uses: actions/setup-go@v6 with: go-version: ${{ env.go-version }} - name: Test run: make test check-goreleaser-config: name: Check goreleaser.yml runs-on: ubuntu-latest steps: - uses: actions/checkout@v6 - name: Setup go uses: actions/setup-go@v6 with: go-version: ${{ env.go-version }} - uses: goreleaser/goreleaser-action@v6 with: version: latest args: check -f .goreleaser.yml dry-run: name: Dry-run release runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v6 with: fetch-depth: 0 - uses: docker/setup-qemu-action@v3 - uses: docker/setup-buildx-action@v3 - uses: anchore/sbom-action/download-syft@v0.21.0 - name: Setup go uses: actions/setup-go@v6 with: go-version: ${{ env.go-version }} - name: Setup latest version tag run: | echo "GORELEASER_CURRENT_TAG=$(git describe --tags --abbrev=0 --match "v*" || echo v0.0.0)" >> $GITHUB_ENV - name: Install build dependencies run: | make libsystemd-dev make gcc-aarch64-linux-gnu - name: GoReleaser uses: goreleaser/goreleaser-action@v6 with: version: latest args: --snapshot --skip=sign,publish --clean - name: Test built containers run: make container-structure-test prometheus-postfix-exporter-0.18.0/.github/workflows/release.yml000066400000000000000000000047141512764000200250760ustar00rootroot00000000000000name: Tag and release on: push: branches: - master paths: - VERSION permissions: contents: write id-token: write packages: write attestations: write jobs: tag: runs-on: ubuntu-latest outputs: release: ${{ steps.tag.outputs.release }} steps: - name: Checkout uses: actions/checkout@v6 with: fetch-depth: 0 - name: Read version id: version run: | if [ ! -f VERSION ]; then echo "VERSION file not found!" exit 1 fi echo "VERSION=v$(cat VERSION)" >> $GITHUB_OUTPUT - name: Check if tag exists id: tag run: | TAG="${{ steps.version.outputs.VERSION }}" if git rev-parse "$TAG" >/dev/null 2>&1; then echo "release=false" >> $GITHUB_OUTPUT else echo "release=true" >> $GITHUB_OUTPUT fi - name: Create tag if: steps.tag.outputs.release == 'true' run: | TAG="${{ steps.version.outputs.VERSION }}" git tag "$TAG" git push origin "$TAG" release: runs-on: ubuntu-latest needs: tag if: needs.tag.outputs.release == 'true' env: DOCKER_CLI_EXPERIMENTAL: "enabled" steps: - name: Checkout uses: actions/checkout@v6 with: fetch-depth: 0 - uses: docker/setup-qemu-action@v3 - uses: docker/setup-buildx-action@v3 - uses: sigstore/cosign-installer@v4.0.0 - uses: anchore/sbom-action/download-syft@v0.21.0 - name: GHCR Login uses: docker/login-action@v3 with: registry: ghcr.io username: ${{ github.repository_owner }} password: ${{ secrets.GITHUB_TOKEN }} - name: Setup go uses: actions/setup-go@v6 with: go-version: stable - name: Install build dependencies run: | make libsystemd-dev make gcc-aarch64-linux-gnu - name: GoReleaser id: release uses: goreleaser/goreleaser-action@v6 with: version: latest args: release --clean env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Attest checksums uses: actions/attest-build-provenance@v3 with: subject-checksums: ./dist/checksums.txt - name: Attest image digests uses: actions/attest-build-provenance@v3 with: subject-checksums: ./dist/digests.txt prometheus-postfix-exporter-0.18.0/.gitignore000066400000000000000000000002051512764000200213150ustar00rootroot00000000000000# Editor files *~ .idea/ # Test binary, build with `go test -c` *.test # Binaries /postfix_exporter *.iml vendor/ dist/ cover.out prometheus-postfix-exporter-0.18.0/.golangci.yml000066400000000000000000000012311512764000200217110ustar00rootroot00000000000000version: "2" linters: enable: - dupl - ginkgolinter - gocheckcompilerdirectives - gocognit - goconst - gocyclo - godot - govet - revive - unparam settings: revive: rules: - name: dot-imports disabled: true govet: enable: - fieldalignment exclusions: generated: lax presets: - comments - common-false-positives - legacy - std-error-handling paths: - third_party$ - builtin$ - examples$ formatters: enable: - gofmt exclusions: generated: lax paths: - third_party$ - builtin$ - examples$ prometheus-postfix-exporter-0.18.0/.goreleaser.yml000066400000000000000000000065171512764000200222720ustar00rootroot00000000000000version: 2 project_name: postfix_exporter before: hooks: - go mod download builds: - id: default env: - CGO_ENABLED=0 flags: - -tags=nosystemd,nodocker goos: - linux goarch: - amd64 - arm64 - id: with-docker goarch: - amd64 - arm64 flags: - -tags=nosystemd env: - CGO_ENABLED=0 - id: with-systemd goarch: - amd64 - arm64 flags: - -tags=nodocker env: - CGO_ENABLED=1 - >- {{- if eq .Arch "arm64" }}CC=/usr/bin/aarch64-linux-gnu-gcc{{- end }} goos: - linux - id: aio goos: - linux goarch: - amd64 - arm64 env: - CGO_ENABLED=1 - >- {{- if eq .Arch "arm64" }}CC=/usr/bin/aarch64-linux-gnu-gcc{{- end }} dockers: - image_templates: - "ghcr.io/hsn723/{{.ProjectName}}:{{ .Version }}-amd64" use: buildx ids: - default dockerfile: Dockerfile extra_files: - LICENSE build_flag_templates: - "--platform=linux/amd64" - "--label=org.opencontainers.image.created={{.Date}}" - "--label=org.opencontainers.image.revision={{.FullCommit}}" - "--label=org.opencontainers.image.version={{.Version}}" - image_templates: - "ghcr.io/hsn723/{{.ProjectName}}:{{ .Version }}-arm64" use: buildx ids: - default goarch: arm64 dockerfile: Dockerfile extra_files: - LICENSE build_flag_templates: - "--platform=linux/arm64" - "--label=org.opencontainers.image.created={{.Date}}" - "--label=org.opencontainers.image.revision={{.FullCommit}}" - "--label=org.opencontainers.image.version={{.Version}}" docker_manifests: - name_template: "ghcr.io/hsn723/{{.ProjectName}}:latest" image_templates: - "ghcr.io/hsn723/{{.ProjectName}}:{{ .Version }}-amd64" - "ghcr.io/hsn723/{{.ProjectName}}:{{ .Version }}-arm64" - name_template: "ghcr.io/hsn723/{{.ProjectName}}:{{ .Version }}" image_templates: - "ghcr.io/hsn723/{{.ProjectName}}:{{ .Version }}-amd64" - "ghcr.io/hsn723/{{.ProjectName}}:{{ .Version }}-arm64" - name_template: "ghcr.io/hsn723/{{.ProjectName}}:{{ .Major }}.{{ .Minor }}" image_templates: - "ghcr.io/hsn723/{{.ProjectName}}:{{ .Version }}-amd64" - "ghcr.io/hsn723/{{.ProjectName}}:{{ .Version }}-arm64" archives: - id: default ids: - default - id: with-docker ids: - with-docker name_template: "{{ .ProjectName }}_docker_{{ .Version }}_{{ .Os }}_{{ .Arch }}" - id: with-systemd ids: - with-systemd name_template: "{{ .ProjectName }}_systemd_{{ .Version }}_{{ .Os }}_{{ .Arch }}" - id: aio ids: - aio name_template: "{{ .ProjectName }}_aio_{{ .Version }}_{{ .Os }}_{{ .Arch }}" checksum: name_template: 'checksums.txt' snapshot: version_template: "{{ .Tag }}-next" changelog: use: github-native source: enabled: true sboms: - id: archive artifacts: archive - id: source artifacts: source signs: - cmd: cosign signature: '${artifact}.sigstore.json' args: - sign-blob - '--yes' - '--bundle=${signature}' - '${artifact}' artifacts: all output: true docker_signs: - cmd: cosign artifacts: all output: true args: - 'sign' - '--yes' - '${artifact}' docker_digest: name_template: 'digests.txt' prometheus-postfix-exporter-0.18.0/.pre-commit-config.yaml000066400000000000000000000010551512764000200236120ustar00rootroot00000000000000repos: - repo: https://github.com/golangci/golangci-lint rev: v2.0.2 hooks: - id: golangci-lint - repo: https://github.com/pre-commit/pre-commit-hooks rev: v5.0.0 hooks: - id: check-merge-conflict - id: check-yaml args: ["--allow-multiple-documents"] exclude: charts/postfix-exporter/templates/ - id: end-of-file-fixer - id: mixed-line-ending args: ["--fix=lf"] ci: autofix_prs: false skip: [golangci-lint, check-merge-conflict, check-yaml, end-of-file-fixer, mixed-line-ending] prometheus-postfix-exporter-0.18.0/Dockerfile000066400000000000000000000004441512764000200213240ustar00rootroot00000000000000FROM scratch LABEL org.opencontainers.image.source="https://github.com/hsn723/postfix_exporter" \ org.opencontainers.image.authors="Hsn723" \ org.opencontainers.image.title="postfix_exporter" EXPOSE 9154 COPY postfix_exporter / COPY LICENSE / ENTRYPOINT ["/postfix_exporter"] prometheus-postfix-exporter-0.18.0/LICENSE000066400000000000000000000261351512764000200203440ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. prometheus-postfix-exporter-0.18.0/Makefile000066400000000000000000000027451512764000200210000ustar00rootroot00000000000000LDFLAGS = -ldflags "-s -w" BINDIR = $(shell pwd)/bin YQ = $(BINDIR)/yq .PHONY: libsystemd-dev libsystemd-dev: @dpkg -s $@ >/dev/null 2>&1 || sudo apt-get update && sudo apt-get install -y --no-install-recommends $@ .PHONY: gcc-aarch64-linux-gnu gcc-aarch64-linux-gnu: @dpkg -s $@ >/dev/null 2>&1 || sudo apt-get update && sudo apt-get install -y $@ .PHONY: test test: libsystemd-dev go test -coverprofile cover.out -count=1 -race -p 4 -v ./... .PHONY: lint lint: libsystemd-dev if [ -z "$(shell which pre-commit)" ]; then pip3 install pre-commit; fi pre-commit install pre-commit run --all-files .PHONY: build build: libsystemd-dev go build $(LDFLAGS) . $(BINDIR): mkdir -p $(BINDIR) CONTAINER_STRUCTURE_TEST = $(BINDIR)/container-structure-test .PHONY: $(CONTAINER_STRUCTURE_TEST) $(CONTAINER_STRUCTURE_TEST): $(BINDIR) curl -sSLf -o $(CONTAINER_STRUCTURE_TEST) https://github.com/GoogleContainerTools/container-structure-test/releases/latest/download/container-structure-test-linux-amd64 && chmod +x $(CONTAINER_STRUCTURE_TEST) .PHONY: container-structure-test container-structure-test: $(CONTAINER_STRUCTURE_TEST) $(YQ) $(YQ) '.builds[] | select(.id == "default") | .goarch[]' .goreleaser.yml | xargs -I {} $(CONTAINER_STRUCTURE_TEST) test --image ghcr.io/hsn723/postfix_exporter:$(shell git describe --tags --abbrev=0 --match "v*" || echo v0.0.0)-next-{} --platform linux/{} --config cst.yaml .PHONY: $(YQ) $(YQ): $(BINDIR) GOBIN=$(BINDIR) go install github.com/mikefarah/yq/v4@latest prometheus-postfix-exporter-0.18.0/README.md000066400000000000000000000273751512764000200206250ustar00rootroot00000000000000[![GitHub release](https://img.shields.io/github/release/hsn723/postfix_exporter.svg?sort=semver&maxAge=60)](https://github.com/hsn723/postfix_exporter/releases) [![Helm release](https://img.shields.io/badge/dynamic/yaml.svg?label=chart&url=https://hsn723.github.io/postfix_exporter/index.yaml&query=$.entries[%22postfix-exporter%22][0].version&colorB=orange&logo=helm)](https://github.com/hsn723/postfix_exporter/releases) [![Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/postfix-exporter)](https://artifacthub.io/packages/helm/postfix-exporter/postfix-exporter) [![main](https://github.com/Hsn723/postfix_exporter/actions/workflows/main.yml/badge.svg?branch=master)](https://github.com/Hsn723/postfix_exporter/actions/workflows/main.yml) [![PkgGoDev](https://pkg.go.dev/badge/github.com/hsn723/postfix_exporter?tab=overview)](https://pkg.go.dev/github.com/hsn723/postfix_exporter?tab=overview) [![Go Report Card](https://goreportcard.com/badge/github.com/hsn723/postfix_exporter)](https://goreportcard.com/report/github.com/hsn723/postfix_exporter) ![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/hsn723/postfix_exporter) # Prometheus Postfix exporter Prometheus metrics exporter for [the Postfix mail server](http://www.postfix.org/). This exporter provides histogram metrics for the size and age of messages stored in the mail queue. It extracts these metrics from Postfix by connecting to a UNIX socket under `/var/spool`. It also counts events by parsing Postfix's log entries, using regular expression matching. The log entries are retrieved from the systemd journal, the Docker logs, or from a log file. The last version of this exporter that supports the EOL Postfix 2.x is 0.14.0. ## Options These options can be used when starting the `postfix_exporter` | Flag | Description | Default | |--------------------------|------------------------------------------------------|-----------------------------------| | `--web.listen-address` | Address to listen on for web interface and telemetry | `9154` | | `--web.config.file ` | Path to configuration file that can enable TLS or authentication [(ref)](https://github.com/prometheus/exporter-toolkit/blob/master/docs/web-configuration.md). | `""` | | `--web.telemetry-path` | Path under which to expose metrics | `/metrics` | | `--postfix.showq_path` | Path at which Postfix places its showq socket | `/var/spool/postfix/public/showq` | | `--postfix.showq_port` | TCP port at which showq is listening | `10025` | | `--postfix.showq_network` | Network protocol to use to connect to showq | `"unix"` | | `--postfix.logfile_path` | Path where Postfix writes log entries | `/var/log/mail.log` | | `--postfix.logfile_must_exist` | Fail if the log file doesn't exist. | `true` | | `--postfix.logfile_poll` | Poll for file changes instead of using inotify. | `false` | | `--postfix.logfile_debug` | Enable debug logging for the log file. | `false` | | `--postfix.cleanup_service_label` | User-defined service labels for the cleanup service. | `cleanup` | | `--postfix.lmtp_service_label` | User-defined service labels for the lmtp service. | `lmtp` | | `--postfix.pipe_service_label` | User-defined service labels for the pipe service. | `pipe` | | `--postfix.qmgr_service_label` | User-defined service labels for the qmgr service. | `qmgr` | | `--postfix.smtp_service_label` | User-defined service labels for the smtp service. | `smtp` | | `--postfix.smtpd_service_label` | User-defined service labels for the smtpd service. | `smtpd` | | `--postfix.bounce_service_label` | User-defined service labels for the bounce service. | `bounce` | | `--postfix.virtual_service_label` | User-defined service labels for the virtual service. | `virtual` | | `--log.unsupported` | Log all unsupported lines | `false` | | `--log.level` | Logging level | `"info"` | | `--log.format` | Logging format | `"logfmt"` | | `--docker.enable` | Read from the Docker logs instead of a file | `false` | | `--docker.container.id` | The container to read Docker logs from | `postfix` | | `--systemd.enable` | Read from the systemd journal instead of file | `false` | | `--systemd.unit` | Name of the Postfix systemd unit | `postfix.service` | | `--systemd.slice` | Name of the Postfix systemd slice. | `""` | | `--systemd.journal_path` | Path to the systemd journal | `""` | | `--kubernetes.enable` | Read from Kubernetes pod logs instead of log | `false` | | `--kubernetes.namespace` | Kubernetes namespace to read logs from | current namespace (in-cluster) or "default" | | `--kubernetes.pod-name` | Specific pod name to read logs from | `""` | | `--kubernetes.service ` | Name of the service selecting the postfix pods | `""` | | `--kubernetes.container` | Container name to read logs from (default: all) | `""` | | `--kubernetes.kubeconfig` | Path to kubeconfig file if out of cluster | `"~/.kube/config"` | - The `--docker.*` flags are not available for binaries built with the `nodocker` build tag - The `--systemd.*` flags are not available for binaries built with the `nosystemd` build tag - The `--kubernetes.*` flags are not available for binaries built with the `nokubernetes` build tag ### User-defined service labels In postfix, services can be configured multiple times and appear with labels that do not match their service types. For instance, all the services defined below are valid services of type `smtp` having different labels. ```sh # master.cf smtp unix - - n - - smtp relay unix - - n - - smtp -o syslog_name=postfix/relay/smtp encrypt unix - - n - - smtp -o smtp_tls_security_level=encrypt ... ``` User-defined service labels, not service types show up in logs. It is therefore necessary to indicate to postfix_exporter how those service labels are mapped to their relevant service type. This can be done with the `--postfix.${SERVICE_TYPE}_service_labels` command-line flags. For instance, for the above `master.cf` example postfix_exporter should be called with all the relevant service labels defined. For example: ```sh ./postfix_exporter --postfix.smtp_service_label smtp \ --postfix.smtp_service_label relay/smtp \ --postfix.smtp_service_label encrypt ``` ## (experimental) Connecting to remote showq instances Instead of connecting to a local socket to extract metrics from a local showq instance, postfix_exporter can connect to a remote showq instance via TCP. Exposing a TCP port for the showq service can be dangerous and extreme caution must be taken to avoid unintentional/unauthorized access to showq, as this will expose sensitive information. ## Events from Docker If postfix_exporter is built with docker support, postfix servers running in a [Docker](https://www.docker.com/) container can be monitored using the `--docker.enable` flag. The default container ID is `postfix`, but can be customized with the `--docker.container.id` flag. The default is to connect to the local Docker, but this can be customized using [the `DOCKER_HOST` and similar](https://pkg.go.dev/github.com/docker/docker/client?tab=doc#NewEnvClient) environment variables. ## Events from log file The log file is tailed when processed. Rotating the log files while the exporter is running is OK. The path to the log file is specified with the `--postfix.logfile_path` flag. ## Events from systemd Retrieval from the systemd journal is enabled with the `--systemd.enable` flag. This overrides the log file setting. It is possible to specify the unit (with `--systemd.unit`) or slice (with `--systemd.slice`). Additionally, it is possible to read the journal from a directory with the `--systemd.journal_path` flag. ## Events from kubernetes (experimental) If postfix_exporter is built with kubernetes support, a postfix_exporter pod can follow logs from one or more postfix pods in a kubernetes environment using the `--kubernetes.enable` flag. A service name (`--kubernetes.service`) or pod name (`--kubernetes.pod-name`) must be specified to select the pod(s) to track. If more than one container runs in the pod, it is also possible to explicitly specify the container postfix runs in using `--kubernetes.container`. As pods are remote to the postfix_exporter instance, it is necessary to connect to showq using TCP. To do so, the service name of the kubernetes service selecting postfix pods must be specified via `--kubernetes.service`. If `--kubernetes.namespace` is not specified, by default the namespace in which postfix_exporter runs is used. If postfix_exporter is running out of cluster (ex: locally), the `"default"` namespace is used and a custom kubeconfig location can be specified via `--kubernetes.kubeconfig`. For postfix_exporter to be able to follow logs from postfix pods, it must also be given the appropriate RBACs. For example: ```yaml apiVersion: v1 kind: ServiceAccount metadata: name: postfix --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: postfix rules: - apiGroups: [""] resources: ["pods", "pods/log", "services"] verbs: ["get", "list", "watch"] - apiGroups: ["apps"] resources: ["statefulsets", "replicasets", "daemonsets"] verbs: ["get", "list", "watch"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: postfix roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: postfix subjects: - kind: ServiceAccount name: postfix namespace: postfix ``` ## Build options By default, the exporter is built without docker and systemd support. ```sh go build -tags nosystemd,nodocker ``` To build the exporter with support for docker or systemd, remove the relevant build build tag from the build arguments. Note that systemd headers are required for building with systemd. On Debian-based systems, this is typically achieved by installing the `libsystemd-dev` APT package. ``` go build -tags nosystemd ``` ## Releases Signed container images are provided from the GitHub Container Registry (https://github.com/Hsn723/postfix_exporter/pkgs/container/postfix_exporter). The binary included in container images is built without docker and systemd support. The [Releases](https://github.com/Hsn723/postfix_exporter/releases) page includes signed pre-built binaries for various configurations. - postfix_exporter binaries are minimal builds (docker and systemd support excluded) - postfix_exporter_docker binaries have docker support built-in - postfix_exporter_systemd binaries have systemd support built-in - postfix_exporter_aio binaries are built with everything included, which can be useful for packaging for systems where the final use-case is not known in advance prometheus-postfix-exporter-0.18.0/VERSION000066400000000000000000000000071512764000200203750ustar00rootroot000000000000000.18.0 prometheus-postfix-exporter-0.18.0/charts/000077500000000000000000000000001512764000200206145ustar00rootroot00000000000000prometheus-postfix-exporter-0.18.0/charts/postfix-exporter/000077500000000000000000000000001512764000200241565ustar00rootroot00000000000000prometheus-postfix-exporter-0.18.0/charts/postfix-exporter/.helmignore000066400000000000000000000005361512764000200263140ustar00rootroot00000000000000 # Patterns to ignore when building packages. # This supports shell glob matching, relative path matching, and # negation (prefixed with !). Only one pattern per line. .DS_Store # Common VCS dirs .git/ .gitignore .bzr/ .bzrignore .hg/ .hgignore .svn/ # Common backup files *.swp *.bak *.tmp *.orig *~ # Various IDEs .project .idea/ *.tmproj .vscode/ prometheus-postfix-exporter-0.18.0/charts/postfix-exporter/Chart.yaml000066400000000000000000000002751512764000200261070ustar00rootroot00000000000000apiVersion: v2 name: postfix-exporter description: A Prometheus exporter for Postfix mail server metrics type: application version: 0.1.3 appVersion: "0.16.1" maintainers: - name: hsn723 prometheus-postfix-exporter-0.18.0/charts/postfix-exporter/README.md000066400000000000000000000056111512764000200254400ustar00rootroot00000000000000# postfix-exporter Helm Chart ## Quick start ### Add the Helm Repository ```sh helm repo add postfix-exporter https://hsn723.github.io/postfix_exporter helm repo update ``` ### Install the Chart Installing the chart with default settings (standalone): ```sh helm install --create-namespace --namespace postfix-exporter postfix-exporter postfix-exporter/postfix-exporter ``` Specify parameters using `--set key=value[,key=value]` arguments to `helm install`, or provide your own `values.yaml`: ```sh helm install --create-namespace --namespace postfix-exporter postfix-exporter -f values.yaml postfix-exporter/postfix-exporter ``` ## Values | Key | Type | Default | Description | |------------------------------------------|--------|---------------------------------------------|-------------------------------------------| | serviceAccountName | string | "postfix" | The name for the service account | | postfixServiceName | string | "" | The name for the postfix Service | | useTCPShowq | bool | `true` | The name for the postfix Service | | createRbac | bool | `true` | Whether to create RBAC resources | | image.repository | string | `"ghcr.io/hsn723/postfix_exporter"` | Image repository to use | | image.tag | string | `{{ .Chart.AppVersion }}` | Image tag to use | | image.pullPolicy | string | "Always" | Image pullPolicy | | image.pullSecrets | list | `[]` | Image pull secret(s) | | deployment.replicas | int | `2` | Number of controller Pod replicas | | deployment.metricsPort | int | `9154` | The metrics server port | | deployment.resources | object | `{"requests":{"cpu":100m,"memory":"20Mi"}}` | Resources requested for Deployment | | deployment.terminationGracePeriodSeconds | int | `10` | terminationGracePeriodSeconds for the Pod | | deployment.extraArgs | list | `[]` | Additional arguments for the controller | ## Generate Manifests ```sh helm template --namespace postfix_exporter postfix_exporter [-f values.yaml] postfix_exporter/postfix_exporter ``` prometheus-postfix-exporter-0.18.0/charts/postfix-exporter/templates/000077500000000000000000000000001512764000200261545ustar00rootroot00000000000000prometheus-postfix-exporter-0.18.0/charts/postfix-exporter/templates/_helpers.tpl000066400000000000000000000032551512764000200305030ustar00rootroot00000000000000{{/* Expand the name of the chart. */}} {{- define "project.name" -}} {{- default .Chart.Name | trunc 63 | trimSuffix "-" | replace "_" "-"}} {{- end }} {{/* Create a default fully qualified app name. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). If release name contains chart name it will be used as a full name. */}} {{- define "project.fullname" -}} {{- $name := default .Chart.Name }} {{- if contains $name .Release.Name }} {{- .Release.Name | trunc 63 | trimSuffix "-" }} {{- else }} {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} {{- end }} {{- end }} {{/* Create a namespaced app name. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). If release name contains chart name it will be used as a full name. */}} {{- define "project.namespacedname" -}} {{- $name := default .Chart.Name }} {{- if contains $name .Release.Name }} {{- printf "%s/%s" .Release.Namespace .Release.Name | trunc 63 | trimSuffix "-" }} {{- else }} {{- printf "%s/%s-%s" .Release.Namespace .Release.Name $name | trunc 63 | trimSuffix "-" }} {{- end }} {{- end }} {{/* Create chart name and version as used by the chart label. */}} {{- define "project.chart" -}} {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} {{- end }} {{/* Common labels */}} {{- define "project.labels" -}} helm.sh/chart: {{ include "project.chart" . }} app.kubernetes.io/name: {{ include "project.name" . }} {{- if .Chart.AppVersion }} app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} {{- end }} app.kubernetes.io/managed-by: {{ .Release.Service }} {{- end }} prometheus-postfix-exporter-0.18.0/charts/postfix-exporter/templates/deployment.yaml000066400000000000000000000046261512764000200312300ustar00rootroot00000000000000apiVersion: apps/v1 kind: Deployment metadata: name: {{ template "project.name" . }} namespace: {{ .Release.Namespace }} labels: {{- include "project.labels" . | nindent 4 }} spec: selector: matchLabels: app.kubernetes.io/name: {{ include "project.name" . }} replicas: {{ .Values.deployment.replicas }} template: metadata: labels: app.kubernetes.io/name: {{ include "project.name" . }} spec: serviceAccountName: {{ .Values.serviceAccountName }} terminationGracePeriodSeconds: {{ .Values.deployment.terminationGracePeriodSeconds }} securityContext: runAsNonRoot: true containers: - name: postfix-exporter image: "{{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}" {{- with .Values.image.pullPolicy }} imagePullPolicy: {{ . }} {{- end }} ports: - containerPort: {{ .Values.deployment.metricsPort }} name: metrics protocol: TCP args: - --kubernetes.enable {{ if .Values.postfixServiceName }} - --kubernetes.service={{ .Values.postfixServiceName }} {{ end }} {{ if .Values.useTCPShowq }} - --postfix.showq_network=tcp {{ end }} {{- range .Values.deployment.extraArgs }} - {{ . }} {{- end }} {{- with .Values.deployment.resources }} resources: {{ toYaml . | nindent 12 }} {{- end }} securityContext: allowPrivilegeEscalation: false readOnlyRootFilesystem: true runAsUser: 65534 livenessProbe: httpGet: path: / port: metrics initialDelaySeconds: 15 periodSeconds: 20 readinessProbe: httpGet: path: / port: metrics initialDelaySeconds: 5 periodSeconds: 10 {{- if .Values.image.pullSecrets }} imagePullSecrets: {{- range .Values.image.pullSecrets }} - name: {{ . }} {{- end }} {{- end }} {{- with .Values.deployment.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} {{- end }} {{- with .Values.deployment.affinity }} affinity: {{- toYaml . | nindent 8 }} {{- end }} {{- with .Values.deployment.tolerations }} tolerations: {{- toYaml . | nindent 8 }} {{- end }} prometheus-postfix-exporter-0.18.0/charts/postfix-exporter/templates/rbac.yaml000066400000000000000000000022651512764000200277540ustar00rootroot00000000000000{{- if .Values.createRbac }} apiVersion: v1 kind: ServiceAccount metadata: name: {{ .Values.serviceAccountName }} namespace: {{ .Release.Namespace }} labels: {{- include "project.labels" . | nindent 4 }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: "{{ .Values.serviceAccountName }}-role" namespace: {{ .Release.Namespace }} labels: {{- include "project.labels" . | nindent 4 }} rules: - apiGroups: - "" resources: - services verbs: - get - apiGroups: - "" resources: - pods verbs: - get - list - apiGroups: - "" resources: - pods/log verbs: - get - list - watch - apiGroups: - "apps" resources: - statefulsets - replicasets - daemonsets verbs: - get --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: "{{ .Values.serviceAccountName }}-rolebinding" namespace: {{ .Release.Namespace }} labels: {{- include "project.labels" . | nindent 4 }} roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: "{{ .Values.serviceAccountName }}-role" subjects: - kind: ServiceAccount name: {{ .Values.serviceAccountName }} namespace: {{ .Release.Namespace }} {{- end }} prometheus-postfix-exporter-0.18.0/charts/postfix-exporter/templates/service.yaml000066400000000000000000000005751512764000200305070ustar00rootroot00000000000000apiVersion: v1 kind: Service metadata: name: {{ template "project.name" . }} namespace: {{ .Release.Namespace }} labels: {{- include "project.labels" . | nindent 4 }} spec: ports: - name: metrics port: {{ .Values.deployment.metricsPort }} targetPort: {{ .Values.deployment.metricsPort }} selector: app.kubernetes.io/name: {{ include "project.name" . }} prometheus-postfix-exporter-0.18.0/charts/postfix-exporter/values.yaml000066400000000000000000000023161512764000200263430ustar00rootroot00000000000000image: # image.repository -- postfix_exporter image repository to use. repository: ghcr.io/hsn723/postfix_exporter # image.tag -- postfix_exporter image tag to use. # @default -- `{{ .Chart.AppVersion }}` tag: # 0.1.0 # image.pullPolicy -- postfix_exporter image pullPolicy. pullPolicy: # Always # image.pullSecrets -- Specify imagePullSecrets. # pullSecrets: deployment: # deployment.replicas -- Specify the number of replicas of the Deployment. replicas: 2 # deployment.metricsPort -- Specify the metrics port of the Deployment. metricsPort: 9154 # deployment.extraArgs -- Specify extra arguments to pass to postfix_exporter. # extraArgs: # deployment.resources -- Specify resources. resources: requests: cpu: 100m memory: 20Mi # deployment.terminationGracePeriodSeconds -- Specify terminationGracePeriodSeconds. terminationGracePeriodSeconds: 10 # serviceAccountName -- Specify the ServiceAccount name. serviceAccountName: postfix # postfixServiceName -- Specify the Postfix service name to monitor. # postfixServiceName: # createRbac -- Whether to create RBAC resources. createRbac: true # useTCPShowq -- Whether to use TCP for showq command. useTCPShowq: true prometheus-postfix-exporter-0.18.0/cst.yaml000066400000000000000000000007631512764000200210130ustar00rootroot00000000000000schemaVersion: "2.0.0" fileExistenceTests: - name: "postfix_exporter" path: "/postfix_exporter" shouldExist: true permissions: "-rwxr-xr-x" metadataTest: entrypoint: ["/postfix_exporter"] labels: - key: "org.opencontainers.image.authors" value: "Hsn723" - key: "org.opencontainers.image.title" value: "postfix_exporter" - key: "org.opencontainers.image.source" value: "https://github.com/hsn723/postfix_exporter" licenseTests: - files: ["/LICENSE"] prometheus-postfix-exporter-0.18.0/ct.yaml000066400000000000000000000001171512764000200206210ustar00rootroot00000000000000target-branch: master validate-maintainers: true check-version-increment: true prometheus-postfix-exporter-0.18.0/exporter/000077500000000000000000000000001512764000200212005ustar00rootroot00000000000000prometheus-postfix-exporter-0.18.0/exporter/postfix_exporter.go000066400000000000000000000661001512764000200251560ustar00rootroot00000000000000// Copyright 2017 Kumina, https://kumina.nl/ // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package exporter import ( "context" "log/slog" "regexp" "slices" "strconv" "strings" "sync" "github.com/hsn723/postfix_exporter/logsource" "github.com/hsn723/postfix_exporter/showq" "github.com/prometheus/client_golang/prometheus" ) // PostfixExporter holds the state that should be preserved by the // Postfix Prometheus metrics exporter across scrapes. type PostfixExporter struct { qmgrInsertsSize prometheus.Histogram virtualDelivered prometheus.Counter bounceNonDelivery prometheus.Counter smtpConnectionTimedOut prometheus.Counter // same as smtpProcesses{status=deferred}, kept for compatibility smtpStatusDeferred prometheus.Counter // should be the same as smtpProcesses{status=deferred}, kept for compatibility, but this doesn't work ! smtpDeferreds prometheus.Counter smtpdSASLAuthenticationFailures prometheus.Counter smtpdFCrDNSErrors prometheus.Counter smtpdDisconnects prometheus.Counter smtpdConnects prometheus.Counter cleanupProcesses prometheus.Counter cleanupRejects prometheus.Counter cleanupNotAccepted prometheus.Counter qmgrExpires prometheus.Counter qmgrRemoves prometheus.Counter qmgrInsertsNrcpt prometheus.Histogram qmgrInsertsNrcptLegacy prometheus.Histogram logSrc logsource.LogSource smtpdLostConnections *prometheus.CounterVec smtpDeferredDSN *prometheus.CounterVec smtpdProcesses *prometheus.CounterVec smtpdRejects *prometheus.CounterVec smtpdTLSConnects *prometheus.CounterVec lmtpDelays *prometheus.HistogramVec pipeDelays *prometheus.HistogramVec smtpDelays *prometheus.HistogramVec smtpTLSConnects *prometheus.CounterVec smtpBouncedDSN *prometheus.CounterVec smtpProcesses *prometheus.CounterVec opendkimSignatureAdded *prometheus.CounterVec unsupportedLogEntries *prometheus.CounterVec postfixUp *prometheus.GaugeVec showq *showq.Showq bounceLabels []string cleanupLabels []string smtpLabels []string smtpdLabels []string virtualLabels []string qmgrLabels []string pipeLabels []string lmtpLabels []string once sync.Once logUnsupportedLines bool } // ServiceLabel is a function to apply user-defined service labels to PostfixExporter. type ServiceLabel func(*PostfixExporter) // Patterns for parsing log messages. var ( logLine = regexp.MustCompile(` ?(postfix|opendkim)(/([\w_\.+/-]+))?\[\d+\]: ((?:(warning|error|fatal|panic): )?.*)`) lmtpPipeSMTPLine = regexp.MustCompile(`, relay=(\S+), .*, delays=([0-9\.]+)/([0-9\.]+)/([0-9\.]+)/([0-9\.]+), `) qmgrInsertLine = regexp.MustCompile(`:.*, size=(\d+), nrcpt=(\d+) `) qmgrExpiredLine = regexp.MustCompile(`:.*, status=(expired|force-expired), returned to sender`) smtpStatusLine = regexp.MustCompile(`, status=(\w+) `) smtpDSNLine = regexp.MustCompile(`, dsn=(\d\.\d+\.\d+)`) smtpTLSLine = regexp.MustCompile(`^(\S+) TLS connection established to \S+: (\S+) with cipher (\S+) \((\d+)/(\d+) bits\)`) smtpConnectionTimedOut = regexp.MustCompile(`^connect\s+to\s+(.*)\[(.*)\]:(\d+):\s+(Connection timed out)$`) smtpdFCrDNSErrorsLine = regexp.MustCompile(`^warning: hostname \S+ does not resolve to address `) smtpdProcessesSASLLine = regexp.MustCompile(`: client=.*, sasl_method=(\S+)`) smtpdRejectsLine = regexp.MustCompile(`^NOQUEUE: reject: RCPT from \S+: ([0-9]+) `) smtpdLostConnectionLine = regexp.MustCompile(`^(?:NOQUEUE: )?lost connection after (\w+) from `) smtpdSASLAuthenticationFailuresLine = regexp.MustCompile(`^warning: \S+: SASL \S+ authentication failed: `) smtpdTLSLine = regexp.MustCompile(`^(\S+) TLS connection established from \S+: (\S+) with cipher (\S+) \((\d+)/(\d+) bits\)`) opendkimSignatureAdded = regexp.MustCompile(`^[\w\d]+: DKIM-Signature field added \(s=(\w+), d=(.*)\)$`) bounceNonDeliveryLine = regexp.MustCompile(`: sender non-delivery notification: `) ) func (e *PostfixExporter) collectFromPostfixLogLine(line, subprocess, level, remainder string) { switch { case slices.Contains(e.cleanupLabels, subprocess): e.collectCleanupLog(line, remainder, level) case slices.Contains(e.lmtpLabels, subprocess): e.collectLMTPLog(line, remainder, level) case slices.Contains(e.pipeLabels, subprocess): e.collectPipeLog(line, remainder, level) case slices.Contains(e.qmgrLabels, subprocess): e.collectQmgrLog(line, remainder, level) case slices.Contains(e.smtpLabels, subprocess): e.collectSMTPLog(line, remainder, level) case slices.Contains(e.smtpdLabels, subprocess): e.collectSMTPdLog(line, remainder, level) case slices.Contains(e.bounceLabels, subprocess): e.collectBounceLog(line, remainder, level) case slices.Contains(e.virtualLabels, subprocess): e.collectVirtualLog(line, remainder, level) default: e.addToUnsupportedLine(line, subprocess, level) } } func (e *PostfixExporter) collectCleanupLog(line, remainder, level string) { switch { case strings.Contains(remainder, ": message-id=<"): e.cleanupProcesses.Inc() case strings.Contains(remainder, ": reject: "): e.cleanupRejects.Inc() default: e.addToUnsupportedLine(line, "cleanup", level) } } func (e *PostfixExporter) collectLMTPLog(line, remainder, level string) { lmtpMatches := lmtpPipeSMTPLine.FindStringSubmatch(remainder) if lmtpMatches == nil { e.addToUnsupportedLine(line, "lmtp", level) return } addToHistogramVec(e.lmtpDelays, lmtpMatches[2], "LMTP pdelay", "before_queue_manager") addToHistogramVec(e.lmtpDelays, lmtpMatches[3], "LMTP adelay", "queue_manager") addToHistogramVec(e.lmtpDelays, lmtpMatches[4], "LMTP sdelay", "connection_setup") addToHistogramVec(e.lmtpDelays, lmtpMatches[5], "LMTP xdelay", "transmission") } func (e *PostfixExporter) collectPipeLog(line, remainder, level string) { pipeMatches := lmtpPipeSMTPLine.FindStringSubmatch(remainder) if pipeMatches == nil { e.addToUnsupportedLine(line, "pipe", level) return } addToHistogramVec(e.pipeDelays, pipeMatches[2], "PIPE pdelay", pipeMatches[1], "before_queue_manager") addToHistogramVec(e.pipeDelays, pipeMatches[3], "PIPE adelay", pipeMatches[1], "queue_manager") addToHistogramVec(e.pipeDelays, pipeMatches[4], "PIPE sdelay", pipeMatches[1], "connection_setup") addToHistogramVec(e.pipeDelays, pipeMatches[5], "PIPE xdelay", pipeMatches[1], "transmission") } func (e *PostfixExporter) collectQmgrLog(line, remainder, level string) { qmgrInsertMatches := qmgrInsertLine.FindStringSubmatch(remainder) switch { case qmgrInsertMatches != nil: addToHistogram(e.qmgrInsertsSize, qmgrInsertMatches[1], "QMGR size") addToHistogram(e.qmgrInsertsNrcptLegacy, qmgrInsertMatches[2], "QMGR nrcpt") addToHistogram(e.qmgrInsertsNrcpt, qmgrInsertMatches[2], "QMGR nrcpt") case strings.HasSuffix(remainder, ": removed"): e.qmgrRemoves.Inc() case qmgrExpiredLine.MatchString(remainder): e.qmgrExpires.Inc() default: e.addToUnsupportedLine(line, "qmgr", level) } } func (e *PostfixExporter) collectSMTPLog(line, remainder, level string) { if smtpMatches := lmtpPipeSMTPLine.FindStringSubmatch(remainder); smtpMatches != nil { addToHistogramVec(e.smtpDelays, smtpMatches[2], "before_queue_manager", "") addToHistogramVec(e.smtpDelays, smtpMatches[3], "queue_manager", "") addToHistogramVec(e.smtpDelays, smtpMatches[4], "connection_setup", "") addToHistogramVec(e.smtpDelays, smtpMatches[5], "transmission", "") e.collectSMTPStatusLog(remainder) } else if smtpTLSMatches := smtpTLSLine.FindStringSubmatch(remainder); smtpTLSMatches != nil { e.smtpTLSConnects.WithLabelValues(smtpTLSMatches[1:]...).Inc() } else if connectionTimedOutMatches := smtpConnectionTimedOut.FindStringSubmatch(remainder); connectionTimedOutMatches != nil { e.smtpConnectionTimedOut.Inc() } else { e.addToUnsupportedLine(line, "smtp", level) } } func (e *PostfixExporter) collectSMTPStatusLog(remainder string) { smtpStatusMatches := smtpStatusLine.FindStringSubmatch(remainder) if smtpStatusMatches == nil { return } e.smtpProcesses.WithLabelValues(smtpStatusMatches[1]).Inc() dsnMatches := smtpDSNLine.FindStringSubmatch(remainder) switch smtpStatusMatches[1] { case "deferred": e.smtpStatusDeferred.Inc() if dsnMatches != nil { e.smtpDeferredDSN.WithLabelValues(dsnMatches[1]).Inc() } case "bounced": if dsnMatches != nil { e.smtpBouncedDSN.WithLabelValues(dsnMatches[1]).Inc() } } } func (e *PostfixExporter) collectSMTPdLog(line, remainder, level string) { if strings.HasPrefix(remainder, "connect from ") { e.smtpdConnects.Inc() } else if strings.HasPrefix(remainder, "disconnect from ") { e.smtpdDisconnects.Inc() } else if smtpdFCrDNSErrorsLine.MatchString(remainder) { e.smtpdFCrDNSErrors.Inc() } else if smtpdLostConnectionMatches := smtpdLostConnectionLine.FindStringSubmatch(remainder); smtpdLostConnectionMatches != nil { e.smtpdLostConnections.WithLabelValues(smtpdLostConnectionMatches[1]).Inc() } else if smtpdProcessesSASLMatches := smtpdProcessesSASLLine.FindStringSubmatch(remainder); smtpdProcessesSASLMatches != nil { e.smtpdProcesses.WithLabelValues(strings.ReplaceAll(smtpdProcessesSASLMatches[1], ",", "")).Inc() } else if strings.Contains(remainder, ": client=") { e.smtpdProcesses.WithLabelValues("NONE").Inc() } else if smtpdRejectsMatches := smtpdRejectsLine.FindStringSubmatch(remainder); smtpdRejectsMatches != nil { e.smtpdRejects.WithLabelValues(smtpdRejectsMatches[1]).Inc() } else if smtpdSASLAuthenticationFailuresLine.MatchString(remainder) { e.smtpdSASLAuthenticationFailures.Inc() } else if smtpdTLSMatches := smtpdTLSLine.FindStringSubmatch(remainder); smtpdTLSMatches != nil { e.smtpdTLSConnects.WithLabelValues(smtpdTLSMatches[1:]...).Inc() } else { e.addToUnsupportedLine(line, "smtpd", level) } } func (e *PostfixExporter) collectBounceLog(line, remainder, level string) { bounceMatches := bounceNonDeliveryLine.FindStringSubmatch(remainder) if bounceMatches == nil { e.addToUnsupportedLine(line, "postfix", level) return } e.bounceNonDelivery.Inc() } func (e *PostfixExporter) collectVirtualLog(line, remainder, level string) { if strings.HasSuffix(remainder, ", status=sent (delivered to maildir)") { e.virtualDelivered.Inc() } else { e.addToUnsupportedLine(line, "postfix", level) } } // CollectFromLogline collects metrict from a Postfix log line. func (e *PostfixExporter) CollectFromLogLine(line string) { if line == "" { return } // Strip off timestamp, hostname, etc. logMatches := logLine.FindStringSubmatch(line) if logMatches == nil { // Unknown log entry format. e.addToUnsupportedLine(line, "", "") return } process := logMatches[1] level := logMatches[5] remainder := logMatches[4] switch process { case "postfix": subprocess := logMatches[3] e.collectFromPostfixLogLine(line, subprocess, level, remainder) case "opendkim": if opendkimMatches := opendkimSignatureAdded.FindStringSubmatch(remainder); opendkimMatches != nil { e.opendkimSignatureAdded.WithLabelValues(opendkimMatches[1], opendkimMatches[2]).Inc() } else { e.addToUnsupportedLine(line, process, level) } default: // Unknown log entry format. e.addToUnsupportedLine(line, process, level) } } func (e *PostfixExporter) addToUnsupportedLine(line string, subprocess string, level string) { if e.logUnsupportedLines { slog.Warn("Unsupported Line", "line", line) } e.unsupportedLogEntries.WithLabelValues(subprocess, level).Inc() } func addToHistogram(h prometheus.Histogram, value, fieldName string) { float, err := strconv.ParseFloat(value, 64) if err != nil { slog.Error("Couldn't convert value for histogram", "value", value, "field", fieldName, "error", err) } h.Observe(float) } func addToHistogramVec(h *prometheus.HistogramVec, value, fieldName string, labels ...string) { float, err := strconv.ParseFloat(value, 64) if err != nil { slog.Error("Couldn't convert value for histogram vector", "value", value, "field", fieldName, "error", err) } h.WithLabelValues(labels...).Observe(float) } var ( defaultCleanupLabels = []string{"cleanup"} defaultLmtpLabels = []string{"lmtp"} defaultPipeLabels = []string{"pipe"} defaultQmgrLabels = []string{"qmgr"} defaultSmtpLabels = []string{"smtp"} defaultSmtpdLabels = []string{"smtpd"} defaultBounceLabels = []string{"bounce"} defaultVirtualLabels = []string{"virtual"} ) // WithCleanupLabels is a function to apply user-defined service labels to PostfixExporter. func WithCleanupLabels(labels []string) ServiceLabel { return func(e *PostfixExporter) { e.cleanupLabels = labels } } // WithCleanupLabels is a function to apply user-defined service labels to PostfixExporter. func WithLmtpLabels(labels []string) ServiceLabel { return func(e *PostfixExporter) { e.lmtpLabels = labels } } // WithCleanupLabels is a function to apply user-defined service labels to PostfixExporter. func WithPipeLabels(labels []string) ServiceLabel { return func(e *PostfixExporter) { e.pipeLabels = labels } } // WithCleanupLabels is a function to apply user-defined service labels to PostfixExporter. func WithQmgrLabels(labels []string) ServiceLabel { return func(e *PostfixExporter) { e.qmgrLabels = labels } } // WithCleanupLabels is a function to apply user-defined service labels to PostfixExporter. func WithSmtpLabels(labels []string) ServiceLabel { return func(e *PostfixExporter) { e.smtpLabels = labels } } // WithCleanupLabels is a function to apply user-defined service labels to PostfixExporter. func WithSmtpdLabels(labels []string) ServiceLabel { return func(e *PostfixExporter) { e.smtpdLabels = labels } } // WithCleanupLabels is a function to apply user-defined service labels to PostfixExporter. func WithBounceLabels(labels []string) ServiceLabel { return func(e *PostfixExporter) { e.bounceLabels = labels } } // WithCleanupLabels is a function to apply user-defined service labels to PostfixExporter. func WithVirtualLabels(labels []string) ServiceLabel { return func(e *PostfixExporter) { e.virtualLabels = labels } } func (e *PostfixExporter) init() { timeBuckets := []float64{1e-3, 1e-2, 1e-1, 1.0, 10, 1 * 60, 1 * 60 * 60, 24 * 60 * 60, 2 * 24 * 60 * 60} e.once.Do(func() { constLabels := logsource.LogSourceDefaults{}.ConstLabels() if e.logSrc != nil { constLabels = e.logSrc.ConstLabels() } e.cleanupProcesses = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: "postfix", Name: "cleanup_messages_processed_total", Help: "Total number of messages processed by cleanup.", ConstLabels: constLabels, }) e.cleanupRejects = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: "postfix", Name: "cleanup_messages_rejected_total", Help: "Total number of messages rejected by cleanup.", ConstLabels: constLabels, }) e.cleanupNotAccepted = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: "postfix", Name: "cleanup_messages_not_accepted_total", Help: "Total number of messages not accepted by cleanup.", ConstLabels: constLabels, }) e.lmtpDelays = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Namespace: "postfix", Name: "lmtp_delivery_delay_seconds", Help: "LMTP message processing time in seconds.", Buckets: timeBuckets, ConstLabels: constLabels, }, []string{"stage"}) e.pipeDelays = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Namespace: "postfix", Name: "pipe_delivery_delay_seconds", Help: "Pipe message processing time in seconds.", Buckets: timeBuckets, ConstLabels: constLabels, }, []string{"relay", "stage"}) // Metric name contains a typo, "receipients", metric is kept to ensure backwards compatibility e.qmgrInsertsNrcptLegacy = prometheus.NewHistogram(prometheus.HistogramOpts{ Namespace: "postfix", Name: "qmgr_messages_inserted_receipients", Help: "Legacy metric, please switch to postfix_qmgr_messages_inserted_recipients.", Buckets: []float64{1, 2, 4, 8, 16, 32, 64, 128}, ConstLabels: constLabels, }) e.qmgrInsertsNrcpt = prometheus.NewHistogram(prometheus.HistogramOpts{ Namespace: "postfix", Name: "qmgr_messages_inserted_recipients", Help: "Number of recipients per message inserted into the mail queues.", Buckets: []float64{1, 2, 4, 8, 16, 32, 64, 128}, ConstLabels: constLabels, }) e.qmgrInsertsSize = prometheus.NewHistogram(prometheus.HistogramOpts{ Namespace: "postfix", Name: "qmgr_messages_inserted_size_bytes", Help: "Size of messages inserted into the mail queues in bytes.", Buckets: []float64{1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9}, ConstLabels: constLabels, }) e.qmgrRemoves = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: "postfix", Name: "qmgr_messages_removed_total", Help: "Total number of messages removed from mail queues.", ConstLabels: constLabels, }) e.qmgrExpires = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: "postfix", Name: "qmgr_messages_expired_total", Help: "Total number of messages expired from mail queues.", ConstLabels: constLabels, }) e.smtpDelays = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Namespace: "postfix", Name: "smtp_delivery_delay_seconds", Help: "SMTP message processing time in seconds.", Buckets: timeBuckets, ConstLabels: constLabels, }, []string{"stage"}) e.smtpTLSConnects = prometheus.NewCounterVec( prometheus.CounterOpts{ Namespace: "postfix", Name: "smtp_tls_connections_total", Help: "Total number of outgoing TLS connections.", ConstLabels: constLabels, }, []string{"trust", "protocol", "cipher", "secret_bits", "algorithm_bits"}) e.smtpDeferreds = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: "postfix", Name: "smtp_deferred_messages_total", Help: "Total number of messages that have been deferred on SMTP.", ConstLabels: constLabels, }) e.smtpProcesses = prometheus.NewCounterVec( prometheus.CounterOpts{ Namespace: "postfix", Name: "smtp_messages_processed_total", Help: "Total number of messages that have been processed by the smtp process.", ConstLabels: constLabels, }, []string{"status"}) e.smtpDeferredDSN = prometheus.NewCounterVec( prometheus.CounterOpts{ Namespace: "postfix", Name: "smtp_deferred_messages_by_dsn_total", Help: "Total number of messages that have been deferred on SMTP by DSN.", ConstLabels: constLabels, }, []string{"dsn"}) e.smtpBouncedDSN = prometheus.NewCounterVec( prometheus.CounterOpts{ Namespace: "postfix", Name: "smtp_bounced_messages_by_dsn_total", Help: "Total number of messages that have been bounced on SMTP by DSN.", ConstLabels: constLabels, }, []string{"dsn"}) e.smtpConnectionTimedOut = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: "postfix", Name: "smtp_connection_timed_out_total", Help: "Total number of messages that have been deferred on SMTP.", ConstLabels: constLabels, }) e.smtpdConnects = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: "postfix", Name: "smtpd_connects_total", Help: "Total number of incoming connections.", ConstLabels: constLabels, }) e.smtpdDisconnects = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: "postfix", Name: "smtpd_disconnects_total", Help: "Total number of incoming disconnections.", ConstLabels: constLabels, }) e.smtpdFCrDNSErrors = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: "postfix", Name: "smtpd_forward_confirmed_reverse_dns_errors_total", Help: "Total number of connections for which forward-confirmed DNS cannot be resolved.", ConstLabels: constLabels, }) e.smtpdLostConnections = prometheus.NewCounterVec( prometheus.CounterOpts{ Namespace: "postfix", Name: "smtpd_connections_lost_total", Help: "Total number of connections lost.", ConstLabels: constLabels, }, []string{"after_stage"}) e.smtpdProcesses = prometheus.NewCounterVec( prometheus.CounterOpts{ Namespace: "postfix", Name: "smtpd_messages_processed_total", Help: "Total number of messages processed.", ConstLabels: constLabels, }, []string{"sasl_method"}) e.smtpdRejects = prometheus.NewCounterVec( prometheus.CounterOpts{ Namespace: "postfix", Name: "smtpd_messages_rejected_total", Help: "Total number of NOQUEUE rejects.", ConstLabels: constLabels, }, []string{"code"}) e.smtpdSASLAuthenticationFailures = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: "postfix", Name: "smtpd_sasl_authentication_failures_total", Help: "Total number of SASL authentication failures.", ConstLabels: constLabels, }) e.smtpdTLSConnects = prometheus.NewCounterVec( prometheus.CounterOpts{ Namespace: "postfix", Name: "smtpd_tls_connections_total", Help: "Total number of incoming TLS connections.", ConstLabels: constLabels, }, []string{"trust", "protocol", "cipher", "secret_bits", "algorithm_bits"}) e.unsupportedLogEntries = prometheus.NewCounterVec( prometheus.CounterOpts{ Namespace: "postfix", Name: "unsupported_log_entries_total", Help: "Log entries that could not be processed.", ConstLabels: constLabels, }, []string{"service", "level"}) e.smtpStatusDeferred = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: "postfix", Name: "smtp_status_deferred", Help: "Total number of messages deferred.", ConstLabels: constLabels, }) e.opendkimSignatureAdded = prometheus.NewCounterVec( // deprecated in a future release. prometheus.CounterOpts{ Namespace: "opendkim", Name: "signatures_added_total", Help: "Total number of messages signed.", ConstLabels: constLabels, }, []string{"subject", "domain"}, ) e.bounceNonDelivery = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: "postfix", Name: "bounce_non_delivery_notification_total", Help: "Total number of non delivery notification sent by bounce.", ConstLabels: constLabels, }) e.virtualDelivered = prometheus.NewCounter(prometheus.CounterOpts{ Namespace: "postfix", Name: "virtual_delivered_total", Help: "Total number of mail delivered to a virtual mailbox.", ConstLabels: constLabels, }) e.postfixUp = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "postfix", Subsystem: "", Name: "up", Help: "Whether scraping Postfix's metrics was successful.", ConstLabels: constLabels, }, []string{"path"}, ) }) } // NewPostfixExporter creates a new Postfix exporter instance. func NewPostfixExporter(s *showq.Showq, logSrc logsource.LogSource, logUnsupportedLines bool, serviceLabels ...ServiceLabel) *PostfixExporter { postfixExporter := &PostfixExporter{ cleanupLabels: defaultCleanupLabels, lmtpLabels: defaultLmtpLabels, pipeLabels: defaultPipeLabels, qmgrLabels: defaultQmgrLabels, smtpLabels: defaultSmtpLabels, smtpdLabels: defaultSmtpdLabels, bounceLabels: defaultBounceLabels, virtualLabels: defaultVirtualLabels, logUnsupportedLines: logUnsupportedLines, showq: s, logSrc: logSrc, } for _, serviceLabel := range serviceLabels { serviceLabel(postfixExporter) } postfixExporter.init() return postfixExporter } // Describe the Prometheus metrics that are going to be exported. func (e *PostfixExporter) Describe(ch chan<- *prometheus.Desc) { e.postfixUp.Describe(ch) if e.logSrc == nil { return } ch <- e.cleanupProcesses.Desc() ch <- e.cleanupRejects.Desc() ch <- e.cleanupNotAccepted.Desc() e.lmtpDelays.Describe(ch) e.pipeDelays.Describe(ch) ch <- e.qmgrInsertsNrcptLegacy.Desc() ch <- e.qmgrInsertsNrcpt.Desc() ch <- e.qmgrInsertsSize.Desc() ch <- e.qmgrRemoves.Desc() ch <- e.qmgrExpires.Desc() e.smtpDelays.Describe(ch) e.smtpTLSConnects.Describe(ch) ch <- e.smtpDeferreds.Desc() e.smtpProcesses.Describe(ch) e.smtpDeferredDSN.Describe(ch) e.smtpBouncedDSN.Describe(ch) ch <- e.smtpdConnects.Desc() ch <- e.smtpdDisconnects.Desc() ch <- e.smtpdFCrDNSErrors.Desc() e.smtpdLostConnections.Describe(ch) e.smtpdProcesses.Describe(ch) e.smtpdRejects.Describe(ch) ch <- e.smtpdSASLAuthenticationFailures.Desc() e.smtpdTLSConnects.Describe(ch) ch <- e.smtpStatusDeferred.Desc() e.unsupportedLogEntries.Describe(ch) e.smtpConnectionTimedOut.Describe(ch) e.opendkimSignatureAdded.Describe(ch) ch <- e.bounceNonDelivery.Desc() ch <- e.virtualDelivered.Desc() } func (e *PostfixExporter) StartMetricCollection(ctx context.Context) { if e.logSrc == nil { return } gauge := e.postfixUp.WithLabelValues(e.logSrc.Path()) defer gauge.Set(0) for { line, err := e.logSrc.Read(ctx) if err != nil { if err != logsource.SystemdNoMoreEntries { slog.Error("Couldn't read log source.", "source", e.logSrc.Path(), "error", err.Error()) return } } e.CollectFromLogLine(line) gauge.Set(1) } } // Collect metrics from Postfix's showq and its log file. func (e *PostfixExporter) Collect(ch chan<- prometheus.Metric) { if e.logSrc == nil { return } if e.showq != nil { err := e.showq.Collect(ch) postfixUpGauge := e.postfixUp.WithLabelValues(e.showq.Path()) if err == nil { postfixUpGauge.Set(1) } else { slog.Error("Failed to scrape showq", "error", err.Error()) postfixUpGauge.Set(0) } e.postfixUp.Collect(ch) } ch <- e.cleanupProcesses ch <- e.cleanupRejects ch <- e.cleanupNotAccepted e.lmtpDelays.Collect(ch) e.pipeDelays.Collect(ch) ch <- e.qmgrInsertsNrcptLegacy ch <- e.qmgrInsertsNrcpt ch <- e.qmgrInsertsSize ch <- e.qmgrRemoves ch <- e.qmgrExpires e.smtpDelays.Collect(ch) e.smtpTLSConnects.Collect(ch) ch <- e.smtpDeferreds e.smtpProcesses.Collect(ch) ch <- e.smtpdConnects ch <- e.smtpdDisconnects ch <- e.smtpdFCrDNSErrors e.smtpdLostConnections.Collect(ch) e.smtpdProcesses.Collect(ch) e.smtpDeferredDSN.Collect(ch) e.smtpBouncedDSN.Collect(ch) e.smtpdRejects.Collect(ch) ch <- e.smtpdSASLAuthenticationFailures e.smtpdTLSConnects.Collect(ch) ch <- e.smtpStatusDeferred e.unsupportedLogEntries.Collect(ch) ch <- e.smtpConnectionTimedOut e.opendkimSignatureAdded.Collect(ch) ch <- e.bounceNonDelivery ch <- e.virtualDelivered } prometheus-postfix-exporter-0.18.0/exporter/postfix_exporter_test.go000066400000000000000000000316501512764000200262170ustar00rootroot00000000000000package exporter import ( "testing" "github.com/prometheus/client_golang/prometheus" io_prometheus_client "github.com/prometheus/client_model/go" "github.com/stretchr/testify/assert" ) type testCounterMetric struct { Label []*io_prometheus_client.LabelPair CounterValue float64 } func stringPtr(s string) *string { return &s } type args struct { line []string unsupportedLogEntries []testCounterMetric removedCount int expiredCount int saslFailedCount int outgoingTLS int smtpdMessagesProcessed int smtpMessagesProcessed int smtpDeferred int smtpBounced int bounceNonDelivery int virtualDelivered int } type testCase struct { serviceLabels []ServiceLabel name string args args } func testPostfixExporter_CollectFromLogline(t *testing.T, tt testCase) { t.Helper() e := NewPostfixExporter(nil, nil, true, tt.serviceLabels...) for _, line := range tt.args.line { e.CollectFromLogLine(line) } assertCounterEquals(t, e.qmgrRemoves, tt.args.removedCount, "Wrong number of lines counted") assertCounterEquals(t, e.qmgrExpires, tt.args.expiredCount, "Wrong number of qmgr expired lines counted") assertCounterEquals(t, e.smtpdSASLAuthenticationFailures, tt.args.saslFailedCount, "Wrong number of Sasl counter counted") assertCounterEquals(t, e.smtpTLSConnects, tt.args.outgoingTLS, "Wrong number of TLS connections counted") assertCounterEquals(t, e.smtpdProcesses, tt.args.smtpdMessagesProcessed, "Wrong number of smtpd messages processed") assertCounterEquals(t, e.smtpProcesses, tt.args.smtpMessagesProcessed, "Wrong number of smtp messages processed") assertCounterEquals(t, e.smtpDeferredDSN, tt.args.smtpDeferred, "Wrong number of smtp deferred") assertCounterEquals(t, e.smtpBouncedDSN, tt.args.smtpBounced, "Wrong number of smtp bounced") assertCounterEquals(t, e.bounceNonDelivery, tt.args.bounceNonDelivery, "Wrong number of non delivery notifications") assertCounterEquals(t, e.virtualDelivered, tt.args.virtualDelivered, "Wrong number of delivered mails") assertCounterVecMetricsEquals(t, e.unsupportedLogEntries, tt.args.unsupportedLogEntries, "Wrong number of unsupportedLogEntries") } func TestPostfixExporter_CollectFromLogline(t *testing.T) { t.Parallel() tests := []testCase{ { name: "Single line", args: args{ line: []string{ "Feb 11 16:49:24 letterman postfix/qmgr[8204]: AAB4D259B1: removed", }, removedCount: 1, saslFailedCount: 0, }, }, { name: "Multiple lines", args: args{ line: []string{ "Feb 11 16:49:24 letterman postfix/qmgr[8204]: AAB4D259B1: removed", "Feb 11 16:49:24 letterman postfix/qmgr[8204]: C2032259E6: removed", "Feb 11 16:49:24 letterman postfix/qmgr[8204]: B83C4257DC: removed", "Feb 11 16:49:24 letterman postfix/qmgr[8204]: 721BE256EA: removed", "Feb 11 16:49:25 letterman postfix/qmgr[8204]: CA94A259EB: removed", "Feb 11 16:49:25 letterman postfix/qmgr[8204]: AC1E3259E1: removed", "Feb 11 16:49:25 letterman postfix/qmgr[8204]: D114D221E3: removed", "Feb 11 16:49:25 letterman postfix/qmgr[8204]: A55F82104D: removed", "Feb 11 16:49:25 letterman postfix/qmgr[8204]: D6DAA259BC: removed", "Feb 11 16:49:25 letterman postfix/qmgr[8204]: E3908259F0: removed", "Feb 11 16:49:25 letterman postfix/qmgr[8204]: 0CBB8259BF: removed", "Feb 11 16:49:25 letterman postfix/qmgr[8204]: EA3AD259F2: removed", "Feb 11 16:49:25 letterman postfix/qmgr[8204]: DDEF824B48: removed", "Feb 11 16:49:26 letterman postfix/qmgr[8204]: 289AF21DB9: removed", "Feb 11 16:49:26 letterman postfix/qmgr[8204]: 6192B260E8: removed", "Feb 11 16:49:26 letterman postfix/qmgr[8204]: F2831259F4: removed", "Feb 11 16:49:26 letterman postfix/qmgr[8204]: 09D60259F8: removed", "Feb 11 16:49:26 letterman postfix/qmgr[8204]: 13A19259FA: removed", "Feb 11 16:49:26 letterman postfix/qmgr[8204]: 2D42722065: removed", "Feb 11 16:49:26 letterman postfix/qmgr[8204]: 746E325A0E: removed", "Feb 11 16:49:26 letterman postfix/qmgr[8204]: 4D2F125A02: removed", "Feb 11 16:49:26 letterman postfix/qmgr[8204]: E30BC259EF: removed", "Feb 11 16:49:26 letterman postfix/qmgr[8204]: DC88924DA1: removed", "Feb 11 16:49:26 letterman postfix/qmgr[8204]: 2164B259FD: removed", "Feb 11 16:49:26 letterman postfix/qmgr[8204]: 8C30525A14: removed", "Feb 11 16:49:26 letterman postfix/qmgr[8204]: 8DCCE25A15: removed", "Feb 11 16:49:26 letterman postfix/qmgr[8204]: C5217255D5: removed", "Feb 11 16:49:27 letterman postfix/qmgr[8204]: D8EE625A28: removed", "Feb 11 16:49:27 letterman postfix/qmgr[8204]: 9AD7C25A19: removed", "Feb 11 16:49:27 letterman postfix/qmgr[8204]: D0EEE2596C: removed", "Feb 11 16:49:27 letterman postfix/qmgr[8204]: DFE732172E: removed", }, removedCount: 31, saslFailedCount: 0, }, }, { name: "qmgr expired", args: args{ line: []string{ "Apr 10 14:50:16 mail postfix/qmgr[3663]: BACE842E72: from=, status=expired, returned to sender", "Apr 10 14:50:16 mail postfix/qmgr[3663]: BACE842E73: from=, status=force-expired, returned to sender", }, expiredCount: 2, }, }, { name: "SASL Failed", args: args{ line: []string{ "Apr 26 10:55:19 tcc1 postfix/smtpd[21126]: warning: SASL authentication failure: cannot connect to saslauthd server: Permission denied", "Apr 26 10:55:19 tcc1 postfix/smtpd[21126]: warning: SASL authentication failure: Password verification failed", "Apr 26 10:55:19 tcc1 postfix/smtpd[21126]: warning: laptop.local[192.168.1.2]: SASL PLAIN authentication failed: generic failure", }, saslFailedCount: 1, removedCount: 0, }, }, { name: "SASL login", args: args{ line: []string{ "Oct 30 13:19:26 mailgw-out1 postfix/smtpd[27530]: EB4B2C19E2: client=xxx[1.2.3.4], sasl_method=PLAIN, sasl_username=user@domain", "Feb 24 16:42:00 letterman postfix/smtpd[24906]: 1CF582025C: client=xxx[2.3.4.5]", }, removedCount: 0, saslFailedCount: 0, outgoingTLS: 0, smtpdMessagesProcessed: 2, }, }, { name: "Issue #35", args: args{ line: []string{ "Jul 24 04:38:17 mail postfix/smtp[30582]: Verified TLS connection established to gmail-smtp-in.l.google.com[108.177.14.26]:25: TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) key-exchange X25519 server-signature RSA-PSS (2048 bits) server-digest SHA256", "Jul 24 03:28:15 mail postfix/smtp[24052]: Verified TLS connection established to mx2.comcast.net[2001:558:fe21:2a::6]:25: TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)", }, removedCount: 0, saslFailedCount: 0, outgoingTLS: 2, smtpdMessagesProcessed: 0, }, }, { name: "Testing delays", args: args{ line: []string{ "Feb 24 16:18:40 letterman postfix/smtp[59649]: 5270320179: to=, relay=mail.telia.com[81.236.60.210]:25, delay=2017, delays=0.1/2017/0.03/0.05, dsn=2.0.0, status=sent (250 2.0.0 6FVIjIMwUJwU66FVIjAEB0 mail accepted for delivery)", }, removedCount: 0, saslFailedCount: 0, outgoingTLS: 0, smtpdMessagesProcessed: 0, smtpMessagesProcessed: 1, }, }, { name: "Testing different smtp statuses", args: args{ line: []string{ "Dec 29 02:54:09 mail postfix/smtp[7648]: 732BB407C3: host mail.domain.com[1.1.1.1] said: 451 DT:SPM 163 mx13,P8CowECpNVM_oEVaenoEAQ--.23796S3 1514512449, please try again 15min later (in reply to end of DATA command)", "Dec 29 02:54:12 mail postfix/smtp[7648]: 732BB407C3: to=, relay=mail.domain.com[1.1.1.1]:25, delay=6.2, delays=0.1/0/5.2/0.87, dsn=4.0.0, status=deferred (host mail.domain.com[1.1.1.1] said: 451 DT:SPM 163 mx40,WsCowAAnEhlCoEVa5GjcAA--.20089S3 1514512452, please try again 15min later (in reply to end of DATA command))", "Dec 29 03:03:48 mail postfix/smtp[8492]: 732BB407C3: to=, relay=mail.domain.com[1.1.1.1]:25, delay=582, delays=563/16/1.7/0.81, dsn=5.0.0, status=bounced (host mail.domain.com[1.1.1.1] said: 554 DT:SPM 163 mx9,O8CowEDJVFKCokVaRhz+AA--.26016S3 1514513028,please see http://mail.domain.com/help/help_spam.htm?ip= (in reply to end of DATA command))", "Dec 29 03:03:48 mail postfix/bounce[9321]: 732BB407C3: sender non-delivery notification: 5DE184083C", }, smtpMessagesProcessed: 2, smtpDeferred: 1, smtpBounced: 1, bounceNonDelivery: 1, }, }, { name: "Testing virtual delivered", args: args{ line: []string{ "Apr 7 15:35:20 123-mail postfix/virtual[20235]: 199041033BE: to=, relay=virtual, delay=0.08, delays=0.08/0/0/0, dsn=2.0.0, status=sent (delivered to maildir)", }, virtualDelivered: 1, }, }, { name: "Testing levels of unsupported entries", args: args{ line: []string{ "Feb 14 19:05:25 123-mail postfix/smtpd[1517]: table hash:/etc/postfix/virtual_mailbox_maps(0,lock|fold_fix) has changed -- restarting", "Mar 16 12:28:02 123-mail postfix/smtpd[16268]: fatal: file /etc/postfix/main.cf: parameter default_privs: unknown user name value: nobody", "Mar 16 23:30:44 123-mail postfix/qmgr[29980]: warning: please avoid flushing the whole queue when you have", "Mar 16 23:30:44 123-mail postfix/qmgr[29980]: warning: lots of deferred mail, that is bad for performance", }, unsupportedLogEntries: []testCounterMetric{ { Label: []*io_prometheus_client.LabelPair{ { Name: stringPtr("level"), Value: stringPtr(""), }, { Name: stringPtr("service"), Value: stringPtr("smtpd"), }, }, CounterValue: 1, }, { Label: []*io_prometheus_client.LabelPair{ { Name: stringPtr("level"), Value: stringPtr("fatal"), }, { Name: stringPtr("service"), Value: stringPtr("smtpd"), }, }, CounterValue: 1, }, { Label: []*io_prometheus_client.LabelPair{ { Name: stringPtr("level"), Value: stringPtr("warning"), }, { Name: stringPtr("service"), Value: stringPtr("qmgr"), }, }, CounterValue: 2, }, }, }, }, { name: "User-defined service labels", args: args{ line: []string{ "Feb 11 16:49:24 letterman postfix/relay/smtp[8204]: AAB4D259B1: to=, relay=example.com[127.0.0.1]:25, delay=0.1, delays=0.1/0/0/0, dsn=2.0.0, status=sent (250 2.0.0 Ok: queued as AAB4D259B1)", "Feb 11 16:49:24 letterman postfix/smtp[8204]: AAB4D259B1: to=, relay=example.com[127.0.0.1]:25, delay=0.1, delays=0.1/0/0/0, dsn=2.0.0, status=sent (250 2.0.0 Ok: queued as AAB4D259B1)", }, smtpMessagesProcessed: 1, unsupportedLogEntries: []testCounterMetric{ { Label: []*io_prometheus_client.LabelPair{ { Name: stringPtr("level"), Value: stringPtr(""), }, { Name: stringPtr("service"), Value: stringPtr("smtp"), }, }, CounterValue: 1, }, }, }, serviceLabels: []ServiceLabel{ WithSmtpLabels([]string{"relay/smtp"}), }, }, } for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() testPostfixExporter_CollectFromLogline(t, tt) }) } } func assertCounterEquals(t *testing.T, counter prometheus.Collector, expected int, message string) { if counter == nil || expected <= 0 { return } switch counter := counter.(type) { case *prometheus.CounterVec: metricsChan := make(chan prometheus.Metric) go func() { counter.Collect(metricsChan) close(metricsChan) }() var count = 0 for metric := range metricsChan { metricDto := io_prometheus_client.Metric{} _ = metric.Write(&metricDto) count += int(*metricDto.Counter.Value) } assert.Equal(t, expected, count, message) case prometheus.Counter: metricsChan := make(chan prometheus.Metric) go func() { counter.Collect(metricsChan) close(metricsChan) }() var count = 0 for metric := range metricsChan { metricDto := io_prometheus_client.Metric{} _ = metric.Write(&metricDto) count += int(*metricDto.Counter.Value) } assert.Equal(t, expected, count, message) default: t.Fatal("Type not implemented") } } func assertCounterVecMetricsEquals(t *testing.T, counter *prometheus.CounterVec, expected []testCounterMetric, message string) { if expected == nil { return } metricsChan := make(chan prometheus.Metric) go func() { counter.Collect(metricsChan) close(metricsChan) }() var res []testCounterMetric for metric := range metricsChan { metricDto := io_prometheus_client.Metric{} _ = metric.Write(&metricDto) cm := testCounterMetric{ Label: metricDto.Label, CounterValue: *metricDto.Counter.Value, } res = append(res, cm) } assert.ElementsMatch(t, expected, res, message) } prometheus-postfix-exporter-0.18.0/go.mod000066400000000000000000000112721512764000200204410ustar00rootroot00000000000000module github.com/hsn723/postfix_exporter go 1.25.0 require ( github.com/alecthomas/kingpin/v2 v2.4.0 github.com/coreos/go-systemd/v22 v22.6.0 github.com/docker/docker v28.5.2+incompatible github.com/nxadm/tail v1.4.11 github.com/prometheus/client_golang v1.23.2 github.com/prometheus/client_model v0.6.2 github.com/prometheus/common v0.67.5 github.com/prometheus/exporter-toolkit v0.15.1 github.com/stretchr/testify v1.11.1 gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 k8s.io/api v0.35.0 k8s.io/apimachinery v0.35.0 k8s.io/client-go v0.35.0 k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 ) require ( github.com/Microsoft/go-winio v0.6.2 // indirect github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/containerd/errdefs v1.0.0 // indirect github.com/containerd/errdefs/pkg v0.3.0 // indirect github.com/containerd/log v0.1.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.6.0 // indirect github.com/docker/go-connections v0.6.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.13.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.9.0 // indirect github.com/fxamacker/cbor/v2 v2.9.0 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.22.4 // indirect github.com/go-openapi/jsonreference v0.21.4 // indirect github.com/go-openapi/swag v0.25.4 // indirect github.com/go-openapi/swag/cmdutils v0.25.4 // indirect github.com/go-openapi/swag/conv v0.25.4 // indirect github.com/go-openapi/swag/fileutils v0.25.4 // indirect github.com/go-openapi/swag/jsonname v0.25.4 // indirect github.com/go-openapi/swag/jsonutils v0.25.4 // indirect github.com/go-openapi/swag/loading v0.25.4 // indirect github.com/go-openapi/swag/mangling v0.25.4 // indirect github.com/go-openapi/swag/netutils v0.25.4 // indirect github.com/go-openapi/swag/stringutils v0.25.4 // indirect github.com/go-openapi/swag/typeutils v0.25.4 // indirect github.com/go-openapi/swag/yamlutils v0.25.4 // indirect github.com/golang-jwt/jwt/v5 v5.3.0 // indirect github.com/google/gnostic-models v0.7.1 // indirect github.com/google/uuid v1.6.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/mdlayher/socket v0.5.1 // indirect github.com/mdlayher/vsock v1.2.1 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/sys/atomicwriter v0.1.0 // indirect github.com/moby/term v0.5.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/morikuni/aec v1.1.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.1 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/procfs v0.19.2 // indirect github.com/spf13/pflag v1.0.10 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 // indirect go.opentelemetry.io/otel v1.39.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.39.0 // indirect go.opentelemetry.io/otel/metric v1.39.0 // indirect go.opentelemetry.io/otel/trace v1.39.0 // indirect go.yaml.in/yaml/v2 v2.4.3 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/crypto v0.46.0 // indirect golang.org/x/net v0.48.0 // indirect golang.org/x/oauth2 v0.34.0 // indirect golang.org/x/sync v0.19.0 // indirect golang.org/x/sys v0.39.0 // indirect golang.org/x/term v0.38.0 // indirect golang.org/x/text v0.32.0 // indirect golang.org/x/time v0.14.0 // indirect google.golang.org/protobuf v1.36.11 // indirect gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect gotest.tools/v3 v3.5.2 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect sigs.k8s.io/randfill v1.0.0 // indirect sigs.k8s.io/structured-merge-diff/v6 v6.3.1 // indirect sigs.k8s.io/yaml v1.6.0 // indirect ) prometheus-postfix-exporter-0.18.0/go.sum000066400000000000000000000576441512764000200205030ustar00rootroot00000000000000github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY= github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b h1:mimo19zliBX/vSQ6PWWSL9lK8qwHozUj03+zLoEB8O0= github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/coreos/go-systemd/v22 v22.6.0 h1:aGVa/v8B7hpb0TKl0MWoAavPDmHvobFe5R5zn0bCJWo= github.com/coreos/go-systemd/v22 v22.6.0/go.mod h1:iG+pp635Fo7ZmV/j14KUcmEyWF+0X7Lua8rrTWzYgWU= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM= github.com/docker/docker v28.5.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94= github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/emicklei/go-restful/v3 v3.13.0 h1:C4Bl2xDndpU6nJ4bc1jXd+uTmYPVUwkD6bFY/oTyCes= github.com/emicklei/go-restful/v3 v3.13.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-openapi/jsonpointer v0.22.4 h1:dZtK82WlNpVLDW2jlA1YCiVJFVqkED1MegOUy9kR5T4= github.com/go-openapi/jsonpointer v0.22.4/go.mod h1:elX9+UgznpFhgBuaMQ7iu4lvvX1nvNsesQ3oxmYTw80= github.com/go-openapi/jsonreference v0.21.4 h1:24qaE2y9bx/q3uRK/qN+TDwbok1NhbSmGjjySRCHtC8= github.com/go-openapi/jsonreference v0.21.4/go.mod h1:rIENPTjDbLpzQmQWCj5kKj3ZlmEh+EFVbz3RTUh30/4= github.com/go-openapi/swag v0.25.4 h1:OyUPUFYDPDBMkqyxOTkqDYFnrhuhi9NR6QVUvIochMU= github.com/go-openapi/swag v0.25.4/go.mod h1:zNfJ9WZABGHCFg2RnY0S4IOkAcVTzJ6z2Bi+Q4i6qFQ= github.com/go-openapi/swag/cmdutils v0.25.4 h1:8rYhB5n6WawR192/BfUu2iVlxqVR9aRgGJP6WaBoW+4= github.com/go-openapi/swag/cmdutils v0.25.4/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0= github.com/go-openapi/swag/conv v0.25.4 h1:/Dd7p0LZXczgUcC/Ikm1+YqVzkEeCc9LnOWjfkpkfe4= github.com/go-openapi/swag/conv v0.25.4/go.mod h1:3LXfie/lwoAv0NHoEuY1hjoFAYkvlqI/Bn5EQDD3PPU= github.com/go-openapi/swag/fileutils v0.25.4 h1:2oI0XNW5y6UWZTC7vAxC8hmsK/tOkWXHJQH4lKjqw+Y= github.com/go-openapi/swag/fileutils v0.25.4/go.mod h1:cdOT/PKbwcysVQ9Tpr0q20lQKH7MGhOEb6EwmHOirUk= github.com/go-openapi/swag/jsonname v0.25.4 h1:bZH0+MsS03MbnwBXYhuTttMOqk+5KcQ9869Vye1bNHI= github.com/go-openapi/swag/jsonname v0.25.4/go.mod h1:GPVEk9CWVhNvWhZgrnvRA6utbAltopbKwDu8mXNUMag= github.com/go-openapi/swag/jsonutils v0.25.4 h1:VSchfbGhD4UTf4vCdR2F4TLBdLwHyUDTd1/q4i+jGZA= github.com/go-openapi/swag/jsonutils v0.25.4/go.mod h1:7OYGXpvVFPn4PpaSdPHJBtF0iGnbEaTk8AvBkoWnaAY= github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4 h1:IACsSvBhiNJwlDix7wq39SS2Fh7lUOCJRmx/4SN4sVo= github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4/go.mod h1:Mt0Ost9l3cUzVv4OEZG+WSeoHwjWLnarzMePNDAOBiM= github.com/go-openapi/swag/loading v0.25.4 h1:jN4MvLj0X6yhCDduRsxDDw1aHe+ZWoLjW+9ZQWIKn2s= github.com/go-openapi/swag/loading v0.25.4/go.mod h1:rpUM1ZiyEP9+mNLIQUdMiD7dCETXvkkC30z53i+ftTE= github.com/go-openapi/swag/mangling v0.25.4 h1:2b9kBJk9JvPgxr36V23FxJLdwBrpijI26Bx5JH4Hp48= github.com/go-openapi/swag/mangling v0.25.4/go.mod h1:6dxwu6QyORHpIIApsdZgb6wBk/DPU15MdyYj/ikn0Hg= github.com/go-openapi/swag/netutils v0.25.4 h1:Gqe6K71bGRb3ZQLusdI8p/y1KLgV4M/k+/HzVSqT8H0= github.com/go-openapi/swag/netutils v0.25.4/go.mod h1:m2W8dtdaoX7oj9rEttLyTeEFFEBvnAx9qHd5nJEBzYg= github.com/go-openapi/swag/stringutils v0.25.4 h1:O6dU1Rd8bej4HPA3/CLPciNBBDwZj9HiEpdVsb8B5A8= github.com/go-openapi/swag/stringutils v0.25.4/go.mod h1:GTsRvhJW5xM5gkgiFe0fV3PUlFm0dr8vki6/VSRaZK0= github.com/go-openapi/swag/typeutils v0.25.4 h1:1/fbZOUN472NTc39zpa+YGHn3jzHWhv42wAJSN91wRw= github.com/go-openapi/swag/typeutils v0.25.4/go.mod h1:Ou7g//Wx8tTLS9vG0UmzfCsjZjKhpjxayRKTHXf2pTE= github.com/go-openapi/swag/yamlutils v0.25.4 h1:6jdaeSItEUb7ioS9lFoCZ65Cne1/RZtPBZ9A56h92Sw= github.com/go-openapi/swag/yamlutils v0.25.4/go.mod h1:MNzq1ulQu+yd8Kl7wPOut/YHAAU/H6hL91fF+E2RFwc= github.com/go-openapi/testify/enable/yaml/v2 v2.0.2 h1:0+Y41Pz1NkbTHz8NngxTuAXxEodtNSI1WG1c/m5Akw4= github.com/go-openapi/testify/enable/yaml/v2 v2.0.2/go.mod h1:kme83333GCtJQHXQ8UKX3IBZu6z8T5Dvy5+CW3NLUUg= github.com/go-openapi/testify/v2 v2.0.2 h1:X999g3jeLcoY8qctY/c/Z8iBHTbwLz7R2WXd6Ub6wls= github.com/go-openapi/testify/v2 v2.0.2/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= github.com/google/gnostic-models v0.7.1 h1:SisTfuFKJSKM5CPZkffwi6coztzzeYUhc3v4yxLWH8c= github.com/google/gnostic-models v0.7.1/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8= github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg= github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mdlayher/socket v0.5.1 h1:VZaqt6RkGkt2OE9l3GcC6nZkqD3xKeQLyfleW/uBcos= github.com/mdlayher/socket v0.5.1/go.mod h1:TjPLHI1UgwEv5J1B5q0zTZq12A/6H7nKmtTanQE37IQ= github.com/mdlayher/vsock v1.2.1 h1:pC1mTJTvjo1r9n9fbm7S1j04rCgCzhCOS5DY0zqHlnQ= github.com/mdlayher/vsock v1.2.1/go.mod h1:NRfCibel++DgeMD8z/hP+PPTjlNJsdPOmxcnENvE+SE= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw= github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs= github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/morikuni/aec v1.1.0 h1:vBBl0pUnvi/Je71dsRrhMBtreIqNMYErSAbEeb8jrXQ= github.com/morikuni/aec v1.1.0/go.mod h1:xDRgiq/iw5l+zkao76YTKzKttOp2cwPEne25HDkJnBw= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns= github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4= github.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw= github.com/prometheus/exporter-toolkit v0.15.1 h1:XrGGr/qWl8Gd+pqJqTkNLww9eG8vR/CoRk0FubOKfLE= github.com/prometheus/exporter-toolkit v0.15.1/go.mod h1:P/NR9qFRGbCFgpklyhix9F6v6fFr/VQB/CVsrMDGKo4= github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws= github.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 h1:ssfIgGNANqpVFCndZvcuyKbl0g+UAVcbBcqGkG28H0Y= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0/go.mod h1:GQ/474YrbE4Jx8gZ4q5I4hrhUzM6UPzyrqJYV2AqPoQ= go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48= go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0 h1:f0cb2XPmrqn4XMy9PNliTgRKJgS5WcL/u0/WRYGz4t0= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0/go.mod h1:vnakAaFckOMiMtOIhFI2MNH4FYrZzXCYxmb1LlhoGz8= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.39.0 h1:Ckwye2FpXkYgiHX7fyVrN1uA/UYd9ounqqTuSNAv0k4= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.39.0/go.mod h1:teIFJh5pW2y+AN7riv6IBPX2DuesS3HgP39mwOspKwU= go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0= go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs= go.opentelemetry.io/otel/sdk v1.39.0 h1:nMLYcjVsvdui1B/4FRkwjzoRVsMK8uL/cj0OyhKzt18= go.opentelemetry.io/otel/sdk v1.39.0/go.mod h1:vDojkC4/jsTJsE+kh+LXYQlbL8CgrEcwmt1ENZszdJE= go.opentelemetry.io/otel/sdk/metric v1.39.0 h1:cXMVVFVgsIf2YL6QkRF4Urbr/aMInf+2WKg+sEJTtB8= go.opentelemetry.io/otel/sdk/metric v1.39.0/go.mod h1:xq9HEVH7qeX69/JnwEfp6fVq5wosJsY1mt4lLfYdVew= go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI= go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA= go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A= go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw= golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q= golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 h1:fCvbg86sFXwdrl5LgVcTEvNC+2txB5mgROGmRL5mrls= google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:+rXWjjaukWZun3mLfjmVnQi18E1AsFbDN9QdJ5YXLto= google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 h1:gRkg/vSppuSQoDjxyiGfN4Upv/h/DQmIR10ZU8dh4Ww= google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM= google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig= google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo= gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= k8s.io/api v0.35.0 h1:iBAU5LTyBI9vw3L5glmat1njFK34srdLmktWwLTprlY= k8s.io/api v0.35.0/go.mod h1:AQ0SNTzm4ZAczM03QH42c7l3bih1TbAXYo0DkF8ktnA= k8s.io/apimachinery v0.35.0 h1:Z2L3IHvPVv/MJ7xRxHEtk6GoJElaAqDCCU0S6ncYok8= k8s.io/apimachinery v0.35.0/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns= k8s.io/client-go v0.35.0 h1:IAW0ifFbfQQwQmga0UdoH0yvdqrbwMdq9vIFEhRpxBE= k8s.io/client-go v0.35.0/go.mod h1:q2E5AAyqcbeLGPdoRB+Nxe3KYTfPce1Dnu1myQdqz9o= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE= k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ= k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck= k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= sigs.k8s.io/structured-merge-diff/v6 v6.3.1 h1:JrhdFMqOd/+3ByqlP2I45kTOZmTRLBUm5pvRjeheg7E= sigs.k8s.io/structured-merge-diff/v6 v6.3.1/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= prometheus-postfix-exporter-0.18.0/logsource/000077500000000000000000000000001512764000200213325ustar00rootroot00000000000000prometheus-postfix-exporter-0.18.0/logsource/logsource.go000066400000000000000000000056121512764000200236670ustar00rootroot00000000000000package logsource import ( "context" "errors" "fmt" "io" "github.com/alecthomas/kingpin/v2" "github.com/prometheus/client_golang/prometheus" ) var ( SystemdNoMoreEntries = errors.New("No more journal entries") // nolint:staticcheck ) // A LogSourceFactory provides a repository of log sources that can be // instantiated from command line flags. type LogSourceFactory interface { // Init adds the factory's struct fields as flags in the // application. Init(*kingpin.Application) // New attempts to create a new log source. This is called after // flags have been parsed. Returning `nil, nil`, means the user // didn't want this log source. New(context.Context) ([]LogSourceCloser, error) // Watchdog indicates whether this log source is unhealthy. Watchdog(context.Context) bool requireEmbed() } type LogSourceFactoryDefaults struct{} func (LogSourceFactoryDefaults) Watchdog(context.Context) bool { return false } func (LogSourceFactoryDefaults) requireEmbed() {} type LogSourceCloser interface { io.Closer LogSource } // A LogSource is an interface to read log lines. type LogSource interface { // Path returns a representation of the log location. Path() string // Read returns the next log line. Returns `io.EOF` at the end of // the log. Read(context.Context) (string, error) ConstLabels() prometheus.Labels RemoteAddr() string requireEmbed() } type LogSourceDefaults struct{} func (LogSourceDefaults) ConstLabels() prometheus.Labels { return prometheus.Labels{} } func (LogSourceDefaults) RemoteAddr() string { return "localhost" } func (LogSourceDefaults) requireEmbed() {} var logSourceFactories []LogSourceFactory // RegisterLogSourceFactory can be called from module `init` functions // to register factories. func RegisterLogSourceFactory(lsf LogSourceFactory) { logSourceFactories = append(logSourceFactories, lsf) } // InitLogSourceFactories runs Init on all factories. The // initialization order is arbitrary, except `fileLogSourceFactory` is // always last (the fallback). The file log source must be last since // it's enabled by default. func InitLogSourceFactories(app *kingpin.Application) { RegisterLogSourceFactory(&fileLogSourceFactory{}) for _, f := range logSourceFactories { f.Init(app) } } // NewLogSourceFromFactories iterates through the factories and // attempts to instantiate a log source. The first factory to return // success wins. func NewLogSourceFromFactories(ctx context.Context) ([]LogSourceCloser, error) { for _, f := range logSourceFactories { src, err := f.New(ctx) if err != nil { return nil, err } if src != nil { return src, nil } } return nil, fmt.Errorf("no log source configured") } // IsWatchdogUnhealthy returns true if any of the log sources report // being unhealthy. func IsWatchdogUnhealthy(ctx context.Context) bool { for _, f := range logSourceFactories { if f.Watchdog(ctx) { return true } } return false } prometheus-postfix-exporter-0.18.0/logsource/logsource_docker.go000066400000000000000000000051521512764000200252150ustar00rootroot00000000000000//go:build !nodocker // +build !nodocker package logsource import ( "bufio" "context" "io" "log/slog" "strings" "github.com/alecthomas/kingpin/v2" "github.com/docker/docker/api/types/container" "github.com/docker/docker/client" ) // A DockerLogSource reads log records from the given Docker // journal. type DockerLogSource struct { client DockerClient reader *bufio.Reader containerID string LogSourceDefaults } // A DockerClient is the client interface that client.Client // provides. See https://pkg.go.dev/github.com/docker/docker/client type DockerClient interface { io.Closer ContainerLogs(context.Context, string, container.LogsOptions) (io.ReadCloser, error) } // NewDockerLogSource returns a log source for reading Docker logs. func NewDockerLogSource(ctx context.Context, c DockerClient, containerID string) (*DockerLogSource, error) { r, err := c.ContainerLogs(ctx, containerID, container.LogsOptions{ ShowStdout: true, ShowStderr: true, Follow: true, Tail: "0", }) if err != nil { return nil, err } logSrc := &DockerLogSource{ client: c, containerID: containerID, reader: bufio.NewReader(r), } return logSrc, nil } func (s *DockerLogSource) Close() error { return s.client.Close() } func (s *DockerLogSource) Path() string { return "docker:" + s.containerID } func (s *DockerLogSource) Read(ctx context.Context) (string, error) { line, err := s.reader.ReadString('\n') if err != nil { return "", err } return strings.TrimSpace(line), nil } // A dockerLogSourceFactory is a factory that can create // DockerLogSources from command line flags. type dockerLogSourceFactory struct { LogSourceFactoryDefaults containerID string enable bool } func (f *dockerLogSourceFactory) Init(app *kingpin.Application) { app.Flag("docker.enable", "Read from Docker logs. Environment variable DOCKER_HOST can be used to change the address. See https://pkg.go.dev/github.com/docker/docker/client?tab=doc#NewEnvClient for more information.").Default("false").BoolVar(&f.enable) app.Flag("docker.container.id", "ID/name of the Postfix Docker container.").Default("postfix").StringVar(&f.containerID) } func (f *dockerLogSourceFactory) New(ctx context.Context) ([]LogSourceCloser, error) { if !f.enable { return nil, nil } slog.Info("Reading log events from Docker") c, err := client.NewClientWithOpts(client.FromEnv) if err != nil { return nil, err } logSource, err := NewDockerLogSource(ctx, c, f.containerID) if err != nil { return nil, err } return []LogSourceCloser{logSource}, nil } func init() { RegisterLogSourceFactory(&dockerLogSourceFactory{}) } prometheus-postfix-exporter-0.18.0/logsource/logsource_docker_test.go000066400000000000000000000036411512764000200262550ustar00rootroot00000000000000//go:build !nodocker // +build !nodocker package logsource import ( "context" "io" "strings" "testing" "github.com/docker/docker/api/types/container" "github.com/stretchr/testify/assert" ) func TestNewDockerLogSource(t *testing.T) { ctx := context.Background() c := &fakeDockerClient{} src, err := NewDockerLogSource(ctx, c, "acontainer") if err != nil { t.Fatalf("NewDockerLogSource failed: %v", err) } assert.Equal(t, []string{"acontainer"}, c.containerLogsCalls, "A call to ContainerLogs should be made.") if err := src.Close(); err != nil { t.Fatalf("Close failed: %v", err) } assert.Equal(t, 1, c.closeCalls, "A call to Close should be made.") } func TestDockerLogSource_Path(t *testing.T) { ctx := context.Background() c := &fakeDockerClient{} src, err := NewDockerLogSource(ctx, c, "acontainer") if err != nil { t.Fatalf("NewDockerLogSource failed: %v", err) } defer src.Close() assert.Equal(t, "docker:acontainer", src.Path(), "Path should be set by New.") } func TestDockerLogSource_Read(t *testing.T) { ctx := context.Background() c := &fakeDockerClient{ logsReader: io.NopCloser(strings.NewReader("Feb 13 23:31:30 ahost anid[123]: aline\n")), } src, err := NewDockerLogSource(ctx, c, "acontainer") if err != nil { t.Fatalf("NewDockerLogSource failed: %v", err) } defer src.Close() s, err := src.Read(ctx) if err != nil { t.Fatalf("Read failed: %v", err) } assert.Equal(t, "Feb 13 23:31:30 ahost anid[123]: aline", s, "Read should get data from the journal entry.") } type fakeDockerClient struct { logsReader io.ReadCloser containerLogsCalls []string closeCalls int } func (c *fakeDockerClient) ContainerLogs(ctx context.Context, containerID string, opts container.LogsOptions) (io.ReadCloser, error) { c.containerLogsCalls = append(c.containerLogsCalls, containerID) return c.logsReader, nil } func (c *fakeDockerClient) Close() error { c.closeCalls++ return nil } prometheus-postfix-exporter-0.18.0/logsource/logsource_file.go000066400000000000000000000066771512764000200247020ustar00rootroot00000000000000package logsource import ( "context" "errors" "io" "log/slog" "github.com/alecthomas/kingpin/v2" "github.com/nxadm/tail" "gopkg.in/tomb.v1" ) var defaultConfig = tail.Config{ ReOpen: true, // reopen the file if it's rotated MustExist: true, // fail immediately if the file is missing or has incorrect permissions Follow: true, // run in follow mode Poll: false, // poll for file changes instead of using inotify Location: &tail.SeekInfo{Whence: io.SeekEnd}, // seek to end of file Logger: tail.DiscardingLogger, } // A FileLogSource can read lines from a file. type FileLogSource struct { tailer *tail.Tail unhealthy bool LogSourceDefaults } // NewFileLogSource creates a new log source, tailing the given file. func NewFileLogSource(path string, config tail.Config) (*FileLogSource, error) { tailer, err := tail.TailFile(path, config) if err != nil { return nil, err } return &FileLogSource{tailer: tailer}, nil } func (s *FileLogSource) Close() error { go func() { // Stop() waits for the tailer goroutine to shut down, but it // can be blocking on sending on the Lines channel... for range s.tailer.Lines { } }() // Do not call .CleanUp() if the file should be tailed again // see also https://pkg.go.dev/github.com/nxadm/tail@v1.4.11#Tail.Cleanup return s.tailer.Stop() } func (s *FileLogSource) Path() string { return s.tailer.Filename } func (s *FileLogSource) Read(ctx context.Context) (string, error) { select { case line, ok := <-s.tailer.Lines: if !ok { s.unhealthy = true if tailErr := s.tailer.Tomb.Err(); tailErr != nil && !errors.Is(tailErr, tomb.ErrStillAlive) { return "", tailErr } return "", io.EOF } return line.Text, nil case <-ctx.Done(): return "", ctx.Err() } } // A fileLogSourceFactory is a factory that can create log sources // from command line flags. // // Because this factory is enabled by default, it must always be // registered last. type fileLogSourceFactory struct { LogSourceFactoryDefaults path string mustExist bool debug bool poll bool source *FileLogSource } func (f *fileLogSourceFactory) Init(app *kingpin.Application) { app.Flag("postfix.logfile_path", "Path where Postfix writes log entries.").Default("/var/log/mail.log").StringVar(&f.path) app.Flag("postfix.logfile_must_exist", "Fail if the log file doesn't exist.").Default("true").BoolVar(&f.mustExist) app.Flag("postfix.logfile_debug", "Enable debug logging for the log file.").Default("false").BoolVar(&f.debug) app.Flag("postfix.logfile_poll", "Poll for file changes instead of using inotify.").Default("false").BoolVar(&f.poll) } // config returns a tail.Config configured from the factory's fields. func (f fileLogSourceFactory) config() tail.Config { conf := defaultConfig conf.MustExist = f.mustExist conf.Poll = f.poll if f.debug { conf.Logger = tail.DefaultLogger } return conf } func (f *fileLogSourceFactory) New(ctx context.Context) ([]LogSourceCloser, error) { if f.path == "" { return nil, nil } slog.Info("Reading log events from file", "path", f.path) logSource, err := NewFileLogSource(f.path, f.config()) if err != nil { return nil, err } f.source = logSource return []LogSourceCloser{logSource}, nil } func (f *fileLogSourceFactory) Watchdog(ctx context.Context) bool { if f.source == nil { return false } return f.source.unhealthy } prometheus-postfix-exporter-0.18.0/logsource/logsource_file_test.go000066400000000000000000000032571512764000200257300ustar00rootroot00000000000000package logsource import ( "context" "fmt" "os" "sync" "testing" "time" "github.com/stretchr/testify/assert" ) func TestFileLogSource_Path(t *testing.T) { path, close, err := setupFakeLogFile() if err != nil { t.Fatalf("setupFakeTailer failed: %v", err) } defer close() src, err := NewFileLogSource(path, defaultConfig) if err != nil { t.Fatalf("NewFileLogSource failed: %v", err) } defer src.Close() assert.Equal(t, path, src.Path(), "Path should be set by New.") } func TestFileLogSource_Read(t *testing.T) { ctx := context.Background() path, close, err := setupFakeLogFile() if err != nil { t.Fatalf("setupFakeTailer failed: %v", err) } defer close() src, err := NewFileLogSource(path, defaultConfig) if err != nil { t.Fatalf("NewFileLogSource failed: %v", err) } defer src.Close() s, err := src.Read(ctx) if err != nil { t.Fatalf("Read failed: %v", err) } assert.Equal(t, "Feb 13 23:31:30 ahost anid[123]: aline", s, "Read should get data from the journal entry.") } func setupFakeLogFile() (string, func(), error) { f, err := os.CreateTemp("", "filelogsource") if err != nil { return "", nil, err } ctx, cancel := context.WithCancel(context.Background()) var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() defer os.Remove(f.Name()) defer f.Close() for { // The tailer seeks to the end and then does a // follow. Keep writing lines so we know it wakes up and // returns lines. fmt.Fprintln(f, "Feb 13 23:31:30 ahost anid[123]: aline") select { case <-time.After(10 * time.Millisecond): // continue case <-ctx.Done(): return } } }() return f.Name(), func() { cancel() wg.Wait() }, nil } prometheus-postfix-exporter-0.18.0/logsource/logsource_kubernetes.go000066400000000000000000000340271512764000200261200ustar00rootroot00000000000000//go:build !nokubernetes // +build !nokubernetes package logsource import ( "bufio" "context" "fmt" "io" "log/slog" "os" "slices" "sync" "time" "github.com/alecthomas/kingpin/v2" "github.com/prometheus/client_golang/prometheus" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "k8s.io/utils/ptr" ) // A KubernetesLogSource can read lines from Kubernetes pod logs. type KubernetesLogSource struct { LogSourceDefaults clientset *kubernetes.Clientset logStream containerLogStream namespace string serviceName string podName string containerName string } // containerLogStream represents a log stream from a specific container. type containerLogStream struct { stream io.ReadCloser scanner *bufio.Scanner } // NewKubernetesLogSource creates a new log source that reads from Kubernetes pod logs. func NewKubernetesLogSource(ctx context.Context, namespace, serviceName, containerName string, pod corev1.Pod, clientset *kubernetes.Clientset) []*KubernetesLogSource { containers := pod.Spec.Containers if containerName != "" { for _, container := range pod.Spec.Containers { if container.Name == containerName { containers = []corev1.Container{container} break } } } var logSources []*KubernetesLogSource for _, container := range containers { logSource := createKubernetesLogSource(ctx, namespace, serviceName, container.Name, pod, clientset) logSources = append(logSources, logSource) } return logSources } func createKubernetesLogSource(ctx context.Context, namespace, serviceName, containerName string, pod corev1.Pod, clientset *kubernetes.Clientset) *KubernetesLogSource { cls, err := getContainerLogStream(ctx, namespace, pod.Name, containerName, clientset) // Even if we fail to get the log stream now, we can retry later in Read(). if err != nil { slog.Error("Failed to get log stream for pod container", "pod", pod.Name, "container", containerName, "error", err) } return &KubernetesLogSource{ clientset: clientset, namespace: namespace, logStream: cls, podName: pod.Name, containerName: containerName, serviceName: serviceName, } } func getContainerLogStream(ctx context.Context, namespace, podName, containerName string, clientset *kubernetes.Clientset) (containerLogStream, error) { logOptions := &corev1.PodLogOptions{ Follow: true, TailLines: ptr.To(int64(10)), // Start with last 10 lines Container: containerName, } // Wait for the pod to start for { pod, err := clientset.CoreV1().Pods(namespace).Get(ctx, podName, metav1.GetOptions{}) if err != nil { continue } if pod.Status.Phase == corev1.PodRunning { break } slog.Info("Waiting for pod to be running", "pod", podName, "phase", pod.Status.Phase) time.Sleep(5 * time.Second) } // Create log stream req := clientset.CoreV1().Pods(namespace).GetLogs(podName, logOptions) logStream, err := req.Stream(ctx) if err != nil { return containerLogStream{}, fmt.Errorf("failed to create log stream for pod %s container %s: %v", podName, containerName, err) } slog.Info("Started log stream for pod container", "pod", podName, "container", containerName) return containerLogStream{ stream: logStream, scanner: bufio.NewScanner(logStream), }, nil } func createClientset(kubeconfigPath string) (*kubernetes.Clientset, bool, error) { var config *rest.Config var err error var inCluster bool // Try in-cluster config first (when running inside Kubernetes) config, err = rest.InClusterConfig() if err != nil { inCluster = false // If in-cluster config fails, try to use local kubeconfig for development slog.Debug("Failed to get in-cluster config, trying local kubeconfig", "error", err) // Use provided kubeconfig path or default location config, err = clientcmd.BuildConfigFromFlags("", kubeconfigPath) if err != nil { return nil, inCluster, fmt.Errorf("failed to create kubernetes config from kubeconfig: %v", err) } } else { inCluster = true } // Create the clientset clientset, err := kubernetes.NewForConfig(config) if err != nil { return nil, inCluster, fmt.Errorf("failed to create kubernetes client: %v", err) } return clientset, inCluster, nil } func determineNamespace(ns string, inCluster bool) string { if ns != "" { return ns } // Default to "default" namespace if none specified. ns = "default" if !inCluster { return ns } // When running in-cluster, try to read the current namespace. if namespaceBytes, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil { ns = string(namespaceBytes) } return ns } func getLogTargets(ctx context.Context, clientset *kubernetes.Clientset, namespace, serviceName, podName string) ([]corev1.Pod, error) { var pods []corev1.Pod var err error if podName != "" { pods, err = getLogTargetsFromPodName(ctx, clientset, namespace, podName) } else if serviceName != "" { pods, err = getLogTargetsFromService(ctx, clientset, namespace, serviceName) } if err != nil { return nil, err } if len(pods) == 0 { return nil, fmt.Errorf("no pods found") } return pods, nil } func getLogTargetsFromPodName(ctx context.Context, clientset *kubernetes.Clientset, namespace, podName string) ([]corev1.Pod, error) { pod, err := clientset.CoreV1().Pods(namespace).Get(ctx, podName, metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("failed to get pod %s in namespace %s: %v", podName, namespace, err) } return []corev1.Pod{*pod}, nil } func getLogTargetsFromService(ctx context.Context, clientset *kubernetes.Clientset, namespace, serviceName string) ([]corev1.Pod, error) { svc, err := clientset.CoreV1().Services(namespace).Get(ctx, serviceName, metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("failed to get service %s in namespace %s: %v", serviceName, namespace, err) } if len(svc.Spec.Selector) == 0 { return nil, fmt.Errorf("service %s in namespace %s has no selector", serviceName, namespace) } selector := labels.Set(svc.Spec.Selector).AsSelector().String() pods, err := getLogTargetsFromLabelSelector(ctx, clientset, namespace, selector) if err != nil { return nil, err } for { if len(pods) == 0 { continue } areReplicasReady, err := areReplicasReady(ctx, clientset, pods[0]) if err != nil { return nil, err } if areReplicasReady { break } slog.Info("Waiting for pods of service to be ready", "service", serviceName) time.Sleep(5 * time.Second) } return pods, nil } func getLogTargetsFromLabelSelector(ctx context.Context, clientset *kubernetes.Clientset, namespace, labelSelector string) ([]corev1.Pod, error) { podList, err := clientset.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ LabelSelector: labelSelector, }) if err != nil { return nil, fmt.Errorf("failed to list pods with label selector %s in namespace %s: %v", labelSelector, namespace, err) } return podList.Items, nil } func areReplicasReady(ctx context.Context, clientset *kubernetes.Clientset, pod corev1.Pod) (bool, error) { for _, owner := range pod.OwnerReferences { switch owner.Kind { case "ReplicaSet": return isReplicaSetReady(ctx, clientset, owner.Name, pod.Namespace) case "StatefulSet": return isStatefulSetReady(ctx, clientset, owner.Name, pod.Namespace) case "DaemonSet": return isDaemonSetReady(ctx, clientset, owner.Name, pod.Namespace) default: return true, nil // Not a controller we care about } } // sanity return true, nil } func isReplicaSetReady(ctx context.Context, clientset *kubernetes.Clientset, name, namespace string) (bool, error) { rs, err := clientset.AppsV1().ReplicaSets(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("failed to get ReplicaSet %s in namespace %s: %v", name, namespace, err) } return rs.Status.ReadyReplicas == *rs.Spec.Replicas, nil } func isStatefulSetReady(ctx context.Context, clientset *kubernetes.Clientset, name, namespace string) (bool, error) { ss, err := clientset.AppsV1().StatefulSets(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("failed to get StatefulSet %s in namespace %s: %v", name, namespace, err) } return ss.Status.ReadyReplicas == *ss.Spec.Replicas, nil } func isDaemonSetReady(ctx context.Context, clientset *kubernetes.Clientset, name, namespace string) (bool, error) { ds, err := clientset.AppsV1().DaemonSets(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("failed to get DaemonSet %s in namespace %s: %v", name, namespace, err) } return ds.Status.NumberReady == ds.Status.DesiredNumberScheduled, nil } func (s *KubernetesLogSource) initializeLogStream(ctx context.Context) error { if s.logStream.stream != nil { return nil // Already initialized } newStream, err := getContainerLogStream(ctx, s.namespace, s.podName, s.containerName, s.clientset) if err != nil { return fmt.Errorf("failed to initialize log stream for pod %s container %s: %v", s.podName, s.containerName, err) } s.logStream = newStream return nil } func (s *KubernetesLogSource) Close() error { if s.logStream.stream == nil { return nil } return s.logStream.stream.Close() } func (s *KubernetesLogSource) Path() string { namespace := s.namespace if namespace == "" { namespace = "default" } return fmt.Sprintf("kubernetes://%s/%s/%s", namespace, s.podName, s.containerName) } func (s *KubernetesLogSource) Read(ctx context.Context) (string, error) { // Ensure log stream is initialized if err := s.initializeLogStream(ctx); err != nil { return "", err } select { case <-ctx.Done(): return "", ctx.Err() default: if s.logStream.scanner.Scan() { line := s.logStream.scanner.Text() return line, nil } // The pod might have restarted or the stream might have been closed. // The stream will be re-initialized on the next Read() call. s.logStream.stream.Close() s.logStream.stream = nil s.logStream.scanner = nil return "", nil } } func (s *KubernetesLogSource) ConstLabels() prometheus.Labels { labels := prometheus.Labels{} if s.namespace != "" { labels["namespace"] = s.namespace } if s.podName != "" { labels["pod"] = s.podName } if s.containerName != "" { labels["container"] = s.containerName } return labels } func (s *KubernetesLogSource) RemoteAddr() string { return fmt.Sprintf("%s.%s.%s.svc.cluster.local", s.podName, s.serviceName, s.namespace) } // kubernetesLogSourceFactory is a factory that can create Kubernetes log sources // from command line flags. type kubernetesLogSourceFactory struct { LogSourceFactoryDefaults clientset *kubernetes.Clientset namespace string podName string serviceName string containerName string kubeconfigPath string watchedPods []string enable bool } func (f *kubernetesLogSourceFactory) Init(app *kingpin.Application) { app.Flag("kubernetes.enable", "Read from Kubernetes pod logs instead of log").Default("false").BoolVar(&f.enable) app.Flag("kubernetes.namespace", "Kubernetes namespace to read logs from (optional, defaults to current namespace when in-cluster or 'default').").Envar("KUBERNETES_NAMESPACE").StringVar(&f.namespace) app.Flag("kubernetes.pod-name", "Specific pod name to read logs from (alternative to label-selector).").Envar("KUBERNETES_POD_NAME").StringVar(&f.podName) app.Flag("kubernetes.service", "Name of the service selecting the postfix pods").Envar("KUBERNETES_SERVICE").StringVar(&f.serviceName) app.Flag("kubernetes.container", "Container name to read logs from (optional, reads from all containers if not specified).").Envar("KUBERNETES_CONTAINER").StringVar(&f.containerName) app.Flag("kubernetes.kubeconfig", "Path to kubeconfig file").Envar("KUBERNETES_KUBECONFIG").Default("~/.kube/config").StringVar(&f.kubeconfigPath) } func (f *kubernetesLogSourceFactory) New(ctx context.Context) ([]LogSourceCloser, error) { if !f.enable { return nil, nil } // Must specify either pod name or label selector (but not both) if f.podName == "" && f.serviceName == "" { return nil, nil // Not configured } if f.podName != "" && f.serviceName != "" { return nil, fmt.Errorf("cannot specify both pod name and label selector, choose one") } // Create the clientset clientset, inCluster, err := createClientset(f.kubeconfigPath) if err != nil { return nil, fmt.Errorf("failed to create kubernetes client: %v", err) } f.clientset = clientset namespace := determineNamespace(f.namespace, inCluster) f.namespace = namespace slog.Info("Using namespace", "namespace", namespace, "in-cluster", inCluster) pods, err := getLogTargets(ctx, clientset, namespace, f.serviceName, f.podName) if err != nil { return nil, err } slog.Info("Found pods to read logs from", "count", len(pods)) var logSources []LogSourceCloser var logSourcesChan = make(chan LogSourceCloser) var wg sync.WaitGroup wg.Add(len(pods)) watchedPods := make([]string, 0, len(pods)) for _, pod := range pods { watchedPods = append(watchedPods, pod.Name) go func(pod corev1.Pod) { defer wg.Done() srcs := NewKubernetesLogSource(ctx, namespace, f.serviceName, f.containerName, pod, clientset) for _, src := range srcs { logSourcesChan <- src } }(pod) } slices.Sort(watchedPods) f.watchedPods = watchedPods go func() { wg.Wait() close(logSourcesChan) }() for src := range logSourcesChan { logSources = append(logSources, src) } return logSources, nil } func (f *kubernetesLogSourceFactory) Watchdog(ctx context.Context) bool { if !f.enable || f.clientset == nil { return false } pods, err := getLogTargets(ctx, f.clientset, f.namespace, f.serviceName, f.podName) if err != nil { slog.Error("Kubernetes watchdog: failed to get log targets", "error", err) // do not restart exporter if we cannot get log targets as this might be a transient error return false } var currentPodNames []string for _, pod := range pods { currentPodNames = append(currentPodNames, pod.Name) } slices.Sort(currentPodNames) return !slices.Equal(f.watchedPods, currentPodNames) } func init() { RegisterLogSourceFactory(&kubernetesLogSourceFactory{}) } prometheus-postfix-exporter-0.18.0/logsource/logsource_kubernetes_test.go000066400000000000000000000047001512764000200271520ustar00rootroot00000000000000package logsource import ( "context" "os" "testing" "github.com/alecthomas/kingpin/v2" "github.com/stretchr/testify/assert" ) func TestKubernetesLogSourceFactory_Init(t *testing.T) { app := kingpin.New("test", "test") factory := &kubernetesLogSourceFactory{enable: true} factory.Init(app) // Parse some test flags args := []string{ "--kubernetes.enable", "--kubernetes.namespace", "default", "--kubernetes.service", "postfix-svc", "--kubernetes.container", "postfix", "--kubernetes.kubeconfig", "/path/to/kubeconfig", } _, err := app.Parse(args) assert.NoError(t, err) assert.True(t, factory.enable) assert.Equal(t, "default", factory.namespace) assert.Equal(t, "postfix-svc", factory.serviceName) assert.Equal(t, "postfix", factory.containerName) assert.Equal(t, "/path/to/kubeconfig", factory.kubeconfigPath) } func TestKubernetesLogSourceFactory_New_NoConfig(t *testing.T) { ctx := context.Background() factory := &kubernetesLogSourceFactory{enable: true} // Should return nil when not configured src, err := factory.New(ctx) assert.NoError(t, err) assert.Nil(t, src) } func TestKubernetesLogSource_Path(t *testing.T) { source := &KubernetesLogSource{ namespace: "test-namespace", podName: "test-pod", containerName: "test-container", } expected := "kubernetes://test-namespace/test-pod/test-container" assert.Equal(t, expected, source.Path()) } func TestKubernetesLogSourceFactory_EnvironmentVariables(t *testing.T) { // Set environment variables os.Setenv("KUBERNETES_POD_NAME", "test-pod-from-env") os.Setenv("KUBERNETES_NAMESPACE", "test-ns-from-env") os.Setenv("KUBERNETES_SERVICE", "test-postfix-svc") os.Setenv("KUBERNETES_CONTAINER", "test-container-from-env") defer func() { // Clean up environment variables os.Unsetenv("KUBERNETES_POD_NAME") os.Unsetenv("KUBERNETES_NAMESPACE") os.Unsetenv("KUBERNETES_SERVICE") os.Unsetenv("KUBERNETES_CONTAINER") }() // Create a new kingpin app and factory app := kingpin.New("test", "test") factory := &kubernetesLogSourceFactory{enable: true} factory.Init(app) // Parse empty args (should use environment variables) _, err := app.Parse([]string{}) assert.NoError(t, err) // Verify environment variables were used assert.Equal(t, "test-pod-from-env", factory.podName) assert.Equal(t, "test-ns-from-env", factory.namespace) assert.Equal(t, "test-postfix-svc", factory.serviceName) assert.Equal(t, "test-container-from-env", factory.containerName) } prometheus-postfix-exporter-0.18.0/logsource/logsource_systemd.go000066400000000000000000000112521512764000200254340ustar00rootroot00000000000000//go:build !nosystemd && linux // +build !nosystemd,linux package logsource import ( "context" "fmt" "io" "log/slog" "time" "github.com/alecthomas/kingpin/v2" "github.com/coreos/go-systemd/v22/sdjournal" ) // timeNow is a test fake injection point. var timeNow = time.Now // A SystemdLogSource reads log records from the given Systemd // journal. type SystemdLogSource struct { journal SystemdJournal path string LogSourceDefaults } // A SystemdJournal is the journal interface that sdjournal.Journal // provides. See https://pkg.go.dev/github.com/coreos/go-systemd/sdjournal?tab=doc type SystemdJournal interface { io.Closer AddMatch(match string) error GetEntry() (*sdjournal.JournalEntry, error) Next() (uint64, error) SeekTail() error PreviousSkip(skip uint64) (uint64, error) Wait(timeout time.Duration) int } // NewSystemdLogSource returns a log source for reading Systemd // journal entries. `unit` and `slice` provide filtering if non-empty // (with `slice` taking precedence). func NewSystemdLogSource(j SystemdJournal, path, unit, slice string) (*SystemdLogSource, error) { logSrc := &SystemdLogSource{journal: j, path: path} var err error if slice != "" { err = logSrc.journal.AddMatch("_SYSTEMD_SLICE=" + slice) } else if unit != "" { err = logSrc.journal.AddMatch("_SYSTEMD_UNIT=" + unit) } if err != nil { logSrc.journal.Close() return nil, err } // Start at end of journal if err := logSrc.journal.SeekTail(); err != nil { logSrc.journal.Close() return nil, err } return logSrc, nil } func (s *SystemdLogSource) Close() error { return s.journal.Close() } func (s *SystemdLogSource) Path() string { return s.path } func (s *SystemdLogSource) Read(ctx context.Context) (string, error) { // wait for any changes in any journal file r := s.journal.Wait(10 * time.Second) // max wait 10 seconds if r < 0 { s.journal.Close() return "", fmt.Errorf("sd_journal.wait returned %d", r) } if r == sdjournal.SD_JOURNAL_INVALIDATE { // the first wait call seems to initialize the watch and results always in INVALIDATE // seek again to the end of the journal if err := s.journal.SeekTail(); err != nil { return "", err } // go back to the last entry, so that next() will advance the pointer to the new entry _, err := s.journal.PreviousSkip(1) if err != nil { return "", err } } else if r == sdjournal.SD_JOURNAL_NOP { // wait timed out without any changes in the journal return "", SystemdNoMoreEntries } c, err := s.journal.Next() if err != nil { return "", err } if c == 0 { // we might get triggered by journal changes, which are unrelated to our matches (unit) // in that case, we are still at the end of the journal, but no new entry has been added for us return "", SystemdNoMoreEntries } e, err := s.journal.GetEntry() if err != nil { return "", err } ts := time.Unix(0, int64(e.RealtimeTimestamp)*int64(time.Microsecond)) entry := fmt.Sprintf( "%s %s %s[%s]: %s", ts.Format(time.Stamp), e.Fields["_HOSTNAME"], e.Fields["SYSLOG_IDENTIFIER"], e.Fields["_PID"], e.Fields["MESSAGE"], ) slog.Debug("Found entry", "entry", entry) return entry, nil } // A systemdLogSourceFactory is a factory that can create // SystemdLogSources from command line flags. type systemdLogSourceFactory struct { LogSourceFactoryDefaults unit, slice, path string enable bool } func (f *systemdLogSourceFactory) Init(app *kingpin.Application) { app.Flag("systemd.enable", "Read from the systemd journal instead of log").Default("false").BoolVar(&f.enable) app.Flag("systemd.unit", "Name of the Postfix systemd unit.").Default("postfix.service").StringVar(&f.unit) app.Flag("systemd.slice", "Name of the Postfix systemd slice. Overrides the systemd unit.").Default("").StringVar(&f.slice) app.Flag("systemd.journal_path", "Path to the systemd journal").Default("").StringVar(&f.path) } func (f *systemdLogSourceFactory) New(ctx context.Context) ([]LogSourceCloser, error) { if !f.enable { return nil, nil } slog.Info("Reading log events from systemd") j, path, err := newSystemdJournal(f.path) if err != nil { return nil, err } source, err := NewSystemdLogSource(j, path, f.unit, f.slice) if err != nil { return nil, err } return []LogSourceCloser{source}, nil } // newSystemdJournal creates a journal handle. It returns the handle // and a string representation of it. If `path` is empty, it connects // to the local journald. func newSystemdJournal(path string) (*sdjournal.Journal, string, error) { if path != "" { j, err := sdjournal.NewJournalFromDir(path) return j, path, err } j, err := sdjournal.NewJournal() return j, "journald", err } func init() { RegisterLogSourceFactory(&systemdLogSourceFactory{}) } prometheus-postfix-exporter-0.18.0/logsource/logsource_systemd_test.go000066400000000000000000000101461512764000200264740ustar00rootroot00000000000000//go:build !nosystemd && linux // +build !nosystemd,linux package logsource import ( "context" "os" "testing" "time" "github.com/coreos/go-systemd/v22/sdjournal" "github.com/stretchr/testify/assert" ) func TestNewSystemdLogSource(t *testing.T) { j := &fakeSystemdJournal{} src, err := NewSystemdLogSource(j, "apath", "aunit", "aslice") if err != nil { t.Fatalf("NewSystemdLogSource failed: %v", err) } assert.Equal(t, []string{"_SYSTEMD_SLICE=aslice"}, j.addMatchCalls, "A match should be added for slice.") assert.Equal(t, 1, j.seekTailCalls, "A call to SeekTail should be made.") assert.Equal(t, 0, len(j.waitCalls), "No call to Wait should be made yet.") if err := src.Close(); err != nil { t.Fatalf("Close failed: %v", err) } assert.Equal(t, 1, j.closeCalls, "A call to Close should be made.") } func TestSystemdLogSource_Path(t *testing.T) { j := &fakeSystemdJournal{} src, err := NewSystemdLogSource(j, "apath", "aunit", "aslice") if err != nil { t.Fatalf("NewSystemdLogSource failed: %v", err) } defer src.Close() assert.Equal(t, "apath", src.Path(), "Path should be set by New.") } func TestSystemdLogSource_Read(t *testing.T) { ctx := context.Background() j := &fakeSystemdJournal{ getEntryValues: []sdjournal.JournalEntry{ { Fields: map[string]string{ "_HOSTNAME": "ahost", "SYSLOG_IDENTIFIER": "anid", "_PID": "123", "MESSAGE": "aline", }, RealtimeTimestamp: 1234567890000000, }, }, nextValues: []uint64{1}, } src, err := NewSystemdLogSource(j, "apath", "aunit", "aslice") if err != nil { t.Fatalf("NewSystemdLogSource failed: %v", err) } defer src.Close() s, err := src.Read(ctx) if err != nil { t.Fatalf("Read failed: %v", err) } assert.Equal(t, []time.Duration{10 * time.Second}, j.waitCalls, "A Wait call should be made") assert.Equal(t, 2, j.seekTailCalls, "Two seekTail calls expected") assert.Equal(t, []uint64{1}, j.previousSkipCalls, "One previousSkipCall expected.") assert.Equal(t, "Feb 13 23:31:30 ahost anid[123]: aline", s, "Read should get data from the journal entry.") } func TestSystemdLogSource_ReadEOF(t *testing.T) { ctx := context.Background() j := &fakeSystemdJournal{ nextValues: []uint64{0}, } src, err := NewSystemdLogSource(j, "apath", "aunit", "aslice") if err != nil { t.Fatalf("NewSystemdLogSource failed: %v", err) } defer src.Close() _, err = src.Read(ctx) assert.Equal(t, SystemdNoMoreEntries, err, "Should interpret Next 0 as no more entries.") } func TestMain(m *testing.M) { // We compare Unix timestamps to date strings, so make it deterministic. os.Setenv("TZ", "UTC") timeNow = func() time.Time { return time.Date(2009, 2, 13, 23, 31, 30, 0, time.UTC) } defer func() { timeNow = time.Now }() os.Exit(m.Run()) } type fakeSystemdJournal struct { getEntryError error nextError error getEntryValues []sdjournal.JournalEntry nextValues []uint64 addMatchCalls []string previousSkipCalls []uint64 waitCalls []time.Duration closeCalls int seekTailCalls int } func (j *fakeSystemdJournal) AddMatch(match string) error { j.addMatchCalls = append(j.addMatchCalls, match) return nil } func (j *fakeSystemdJournal) Close() error { j.closeCalls++ return nil } func (j *fakeSystemdJournal) GetEntry() (*sdjournal.JournalEntry, error) { if len(j.getEntryValues) == 0 { return nil, j.getEntryError } e := j.getEntryValues[0] j.getEntryValues = j.getEntryValues[1:] return &e, nil } func (j *fakeSystemdJournal) Next() (uint64, error) { if len(j.nextValues) == 0 { return 0, j.nextError } v := j.nextValues[0] j.nextValues = j.nextValues[1:] return v, nil } func (j *fakeSystemdJournal) SeekTail() error { j.seekTailCalls++ return nil } func (j *fakeSystemdJournal) PreviousSkip(skip uint64) (uint64, error) { j.previousSkipCalls = append(j.previousSkipCalls, skip) return skip, nil } func (j *fakeSystemdJournal) Wait(timeout time.Duration) int { j.waitCalls = append(j.waitCalls, timeout) if len(j.waitCalls) == 1 { // first wait call return sdjournal.SD_JOURNAL_INVALIDATE } return sdjournal.SD_JOURNAL_APPEND } prometheus-postfix-exporter-0.18.0/main.go000066400000000000000000000170001512764000200206010ustar00rootroot00000000000000package main import ( "context" "fmt" "log/slog" "net/http" "os" "time" _ "embed" "github.com/alecthomas/kingpin/v2" "github.com/hsn723/postfix_exporter/exporter" "github.com/hsn723/postfix_exporter/logsource" "github.com/hsn723/postfix_exporter/showq" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/common/promslog" "github.com/prometheus/exporter-toolkit/web" "github.com/prometheus/exporter-toolkit/web/kingpinflag" ) var ( version string commit string date string builtBy string //go:embed VERSION fallbackVersion string app *kingpin.Application versionFlag bool toolkitFlags *web.FlagConfig metricsPath string postfixShowqPath string postfixShowqPort int postfixShowqNetwork string logUnsupportedLines bool logLevel string logFormat string logConfig *promslog.Config cleanupLabels []string lmtpLabels []string pipeLabels []string qmgrLabels []string smtpLabels []string smtpdLabels []string bounceLabels []string virtualLabels []string useWatchdog bool ) func getShowqAddress(path, remoteAddr, network string, port int) string { switch network { case "unix": return path case "tcp", "tcp4", "tcp6": return fmt.Sprintf("%s:%d", remoteAddr, port) default: logFatal("Unsupported network type", "network", network) return "" } } func buildVersionString() string { versionString := "postfix_exporter " + version if commit != "" { versionString += " (" + commit + ")" } if date != "" { versionString += " built on " + date } if builtBy != "" { versionString += " by: " + builtBy } return versionString } func logFatal(msg string, args ...any) { slog.Error(msg, args...) os.Exit(1) } func initializeExporters(logSrcs []logsource.LogSourceCloser) []*exporter.PostfixExporter { exporters := make([]*exporter.PostfixExporter, 0, len(logSrcs)) for _, logSrc := range logSrcs { showqAddr := getShowqAddress(postfixShowqPath, logSrc.RemoteAddr(), postfixShowqNetwork, postfixShowqPort) s := showq.NewShowq(showqAddr).WithNetwork(postfixShowqNetwork).WithConstLabels(logSrc.ConstLabels()) exporter := exporter.NewPostfixExporter( s, logSrc, logUnsupportedLines, exporter.WithCleanupLabels(cleanupLabels), exporter.WithLmtpLabels(lmtpLabels), exporter.WithPipeLabels(pipeLabels), exporter.WithQmgrLabels(qmgrLabels), exporter.WithSmtpLabels(smtpLabels), exporter.WithSmtpdLabels(smtpdLabels), exporter.WithBounceLabels(bounceLabels), exporter.WithVirtualLabels(virtualLabels), ) prometheus.MustRegister(exporter) exporters = append(exporters, exporter) } return exporters } func runExporter(ctx context.Context) <-chan struct{} { done := make(chan struct{}) logSrcs, err := logsource.NewLogSourceFromFactories(ctx) if err != nil { logFatal("Error opening log source", "error", err.Error()) } exporters := initializeExporters(logSrcs) for _, exporter := range exporters { go exporter.StartMetricCollection(ctx) } go func() { defer close(done) <-ctx.Done() for _, ls := range logSrcs { err := ls.Close() if err != nil { slog.Error("Error closing log source", "error", err.Error()) } } for _, exporter := range exporters { prometheus.Unregister(exporter) } }() return done } func setupMetricsServer(versionString string) error { http.Handle(metricsPath, promhttp.Handler()) lc := web.LandingConfig{ Name: "Postfix Exporter", Description: "Prometheus exporter for postfix metrics", Version: versionString, Links: []web.LandingLinks{ { Address: metricsPath, Text: "Metrics", }, }, } lp, err := web.NewLandingPage(lc) if err != nil { return err } http.Handle("/", lp) return nil } func init() { app = kingpin.New("postfix_exporter", "Prometheus metrics exporter for postfix") app.Flag("version", "Print version information").BoolVar(&versionFlag) app.Flag("watchdog", "Use watchdog to monitor log sources.").Default("false").BoolVar(&useWatchdog) toolkitFlags = kingpinflag.AddFlags(app, ":9154") app.Flag("web.telemetry-path", "Path under which to expose metrics.").Default("/metrics").StringVar(&metricsPath) app.Flag("postfix.showq_path", "Path at which Postfix places its showq socket.").Default("/var/spool/postfix/public/showq").StringVar(&postfixShowqPath) app.Flag("postfix.showq_port", "TCP port at which Postfix's showq service is listening.").Default("10025").IntVar(&postfixShowqPort) app.Flag("postfix.showq_network", "Network type for Postfix's showq service").Default("unix").StringVar(&postfixShowqNetwork) app.Flag("log.unsupported", "Log all unsupported lines.").BoolVar(&logUnsupportedLines) app.Flag("log.level", "Log level: debug, info, warn, error").Default("info").StringVar(&logLevel) app.Flag("log.format", "Log format: logfmt, json").Default("logfmt").StringVar(&logFormat) app.Flag("postfix.cleanup_service_label", "User-defined service labels for the cleanup service.").Default("cleanup").StringsVar(&cleanupLabels) app.Flag("postfix.lmtp_service_label", "User-defined service labels for the lmtp service.").Default("lmtp").StringsVar(&lmtpLabels) app.Flag("postfix.pipe_service_label", "User-defined service labels for the pipe service.").Default("pipe").StringsVar(&pipeLabels) app.Flag("postfix.qmgr_service_label", "User-defined service labels for the qmgr service.").Default("qmgr").StringsVar(&qmgrLabels) app.Flag("postfix.smtp_service_label", "User-defined service labels for the smtp service.").Default("smtp").StringsVar(&smtpLabels) app.Flag("postfix.smtpd_service_label", "User-defined service labels for the smtpd service.").Default("smtpd").StringsVar(&smtpdLabels) app.Flag("postfix.bounce_service_label", "User-defined service labels for the bounce service.").Default("bounce").StringsVar(&bounceLabels) app.Flag("postfix.virtual_service_label", "User-defined service labels for the virtual service.").Default("virtual").StringsVar(&virtualLabels) logsource.InitLogSourceFactories(app) kingpin.MustParse(app.Parse(os.Args[1:])) logConfig = &promslog.Config{ Level: promslog.NewLevel(), Format: promslog.NewFormat(), } if err := logConfig.Level.Set(logLevel); err != nil { logFatal("Invalid log level", "level", logLevel, "error", err.Error()) } if err := logConfig.Format.Set(logFormat); err != nil { logFatal("Invalid log format", "format", logFormat, "error", err.Error()) } } func main() { ctx := context.Background() logger := promslog.New(logConfig) slog.SetDefault(logger) if version == "" { version = fallbackVersion } if versionFlag { os.Stdout.WriteString(version) os.Exit(0) } versionString := buildVersionString() slog.Info(versionString) if err := setupMetricsServer(versionString); err != nil { logFatal("Failed to create landing page", "error", err.Error()) } ctx, cancelFunc := context.WithCancel(ctx) defer cancelFunc() done := runExporter(ctx) // Start watchdog if enabled if useWatchdog { go func() { ticker := time.NewTicker(5 * time.Second) watchdogCtx := context.Background() defer ticker.Stop() for range ticker.C { if logsource.IsWatchdogUnhealthy(watchdogCtx) { slog.Warn("Watchdog: log source unhealthy, reloading") cancelFunc() if done != nil { <-done } ctx, cancelFunc = context.WithCancel(context.Background()) done = runExporter(ctx) } } }() } server := &http.Server{} if err := web.ListenAndServe(server, toolkitFlags, logger); err != nil { logFatal("Error starting HTTP server", "error", err.Error()) } } prometheus-postfix-exporter-0.18.0/showq/000077500000000000000000000000001512764000200204715ustar00rootroot00000000000000prometheus-postfix-exporter-0.18.0/showq/showq.go000066400000000000000000000114521512764000200221640ustar00rootroot00000000000000package showq import ( "bufio" "bytes" "errors" "fmt" "io" "net" "strconv" "sync" "time" "github.com/prometheus/client_golang/prometheus" ) type Showq struct { mu sync.Mutex // Protects histogram/gauge operations during concurrent scrapes ageHistogram *prometheus.HistogramVec sizeHistogram *prometheus.HistogramVec queueMessageGauge *prometheus.GaugeVec knownQueues map[string]struct{} constLabels prometheus.Labels address string network string once sync.Once } // ScanNullTerminatedEntries is a splitting function for bufio.Scanner // to split entries by null bytes. func ScanNullTerminatedEntries(data []byte, atEOF bool) (advance int, token []byte, err error) { if i := bytes.IndexByte(data, 0); i >= 0 { // Valid record found. return i + 1, data[0:i], nil } else if atEOF && len(data) != 0 { // Data at the end of the file without a null terminator. return 0, nil, errors.New("expected null byte terminator") } else { // Request more data. return 0, nil, nil } } // CollectBinaryShowqFromReader parses Postfix's binary showq format. func (s *Showq) collectBinaryShowqFromReader(file io.Reader, ch chan<- prometheus.Metric) error { err := s.collectBinaryShowqFromScanner(file) s.queueMessageGauge.Collect(ch) s.sizeHistogram.Collect(ch) s.ageHistogram.Collect(ch) return err } func (s *Showq) collectBinaryShowqFromScanner(file io.Reader) error { scanner := bufio.NewScanner(file) scanner.Split(ScanNullTerminatedEntries) queueSizes := make(map[string]float64) // HistogramVec is intended to capture data streams. Showq however always returns all emails // currently queued, therefore we need to reset the histograms before every collect. s.sizeHistogram.Reset() s.ageHistogram.Reset() now := float64(time.Now().UnixNano()) / 1e9 queue := "unknown" for scanner.Scan() { // Parse a key/value entry. key := scanner.Text() if len(key) == 0 { // Empty key means a record separator. queue = "unknown" continue } if !scanner.Scan() { return fmt.Errorf("key %q does not have a value", key) } value := scanner.Text() switch key { case "queue_name": // The name of the message queue. queue = value queueSizes[queue]++ case "size": // Message size in bytes. size, err := strconv.ParseFloat(value, 64) if err != nil { return err } s.sizeHistogram.WithLabelValues(queue).Observe(size) case "time": // Message time as a UNIX timestamp. utime, err := strconv.ParseFloat(value, 64) if err != nil { return err } s.ageHistogram.WithLabelValues(queue).Observe(now - utime) } } for q, count := range queueSizes { s.queueMessageGauge.WithLabelValues(q).Set(count) } for q := range s.knownQueues { if _, seen := queueSizes[q]; !seen { s.queueMessageGauge.WithLabelValues(q).Set(0) s.sizeHistogram.WithLabelValues(q) s.ageHistogram.WithLabelValues(q) } } return scanner.Err() } func (s *Showq) init() { s.once.Do(func() { s.ageHistogram = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Namespace: "postfix", Name: "showq_message_age_seconds", Help: "Age of messages in Postfix's message queue, in seconds", Buckets: []float64{1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8}, ConstLabels: s.constLabels, }, []string{"queue"}) s.sizeHistogram = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Namespace: "postfix", Name: "showq_message_size_bytes", Help: "Size of messages in Postfix's message queue, in bytes", Buckets: []float64{1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9}, ConstLabels: s.constLabels, }, []string{"queue"}) s.queueMessageGauge = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "postfix", Name: "showq_queue_depth", Help: "Number of messages in Postfix's message queue", ConstLabels: s.constLabels, }, []string{"queue"}, ) s.knownQueues = map[string]struct{}{"active": {}, "deferred": {}, "hold": {}, "incoming": {}, "maildrop": {}} }) } func (s *Showq) Collect(ch chan<- prometheus.Metric) error { // Lock BEFORE opening socket to serialize all showq queries // This prevents concurrent connections to showq daemon which may not handle them properly s.mu.Lock() defer s.mu.Unlock() fd, err := net.Dial(s.network, s.address) if err != nil { return err } defer fd.Close() s.init() return s.collectBinaryShowqFromReader(fd, ch) } func (s *Showq) Path() string { return fmt.Sprintf("%s://%s", s.network, s.address) } func (s *Showq) WithConstLabels(labels prometheus.Labels) *Showq { s.constLabels = labels return s } func (s *Showq) WithNetwork(network string) *Showq { s.network = network return s } func NewShowq(addr string) *Showq { return &Showq{ address: addr, network: "unix", } } prometheus-postfix-exporter-0.18.0/showq/showq_test.go000066400000000000000000000060651512764000200232270ustar00rootroot00000000000000package showq import ( "bytes" "fmt" "io" "testing" "time" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/stretchr/testify/assert" ) func getSum(t *testing.T, histogram *prometheus.HistogramVec) float64 { t.Helper() var total float64 metrics := make(chan prometheus.Metric, 10) histogram.Collect(metrics) close(metrics) for m := range metrics { dtoMetric := dto.Metric{} if err := m.Write(&dtoMetric); err != nil { t.Fatalf("failed to write metric: %v", err) } if hist := dtoMetric.GetHistogram(); hist != nil { total += hist.GetSampleSum() } } return total } func getCount(t *testing.T, gauge *prometheus.GaugeVec) map[string]float64 { t.Helper() var values = make(map[string]float64) metrics := make(chan prometheus.Metric, 10) gauge.Collect(metrics) close(metrics) for m := range metrics { dtoMetric := dto.Metric{} if err := m.Write(&dtoMetric); err != nil { t.Fatalf("failed to write metric: %v", err) } if gaugeValue := dtoMetric.GetGauge(); gaugeValue != nil { for _, label := range dtoMetric.Label { if label.GetName() == "queue" { values[label.GetValue()] = gaugeValue.GetValue() } } } } return values } func TestCollectBinaryShowqFromReader(t *testing.T) { t.Parallel() tests := []struct { name string data []string wantErr bool expectedTotalCount float64 expectedActiveCount float64 expectedDeferredCount float64 }{ { name: "basic test", data: []string{ "queue_name", "active", "size", "1234", "time", fmt.Sprintf("%d", time.Now().Add(-1*time.Second).Unix()), "", "queue_name", "deferred", "size", "5678", "time", fmt.Sprintf("%d", time.Now().Add(-2*time.Second).Unix()), "", "queue_name", "active", "size", "1000", "time", fmt.Sprintf("%d", time.Now().Add(-3*time.Second).Unix()), "", }, expectedTotalCount: 7912, expectedActiveCount: 2, expectedDeferredCount: 1, }, } for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() var b bytes.Buffer for _, datum := range tt.data { if datum != "" { b.WriteString(datum) } b.WriteByte(0) // Null-terminate each entry } reader := bytes.NewReader(b.Bytes()) s := NewShowq("") s.init() _, err := reader.Seek(0, io.SeekStart) assert.NoError(t, err, "Failed to reset reader position") if err := s.collectBinaryShowqFromScanner(&b); (err != nil) != tt.wantErr { t.Errorf("CollectBinaryShowqFromReader() error = %v, wantErr %v", err, tt.wantErr) } assert.Equal(t, tt.expectedTotalCount, getSum(t, s.sizeHistogram), "Expected a lot more data.") assert.Less(t, 0.0, getSum(t, s.ageHistogram), "Age not greater than 0") counts := getCount(t, s.queueMessageGauge) assert.Equal(t, tt.expectedActiveCount, counts["active"], "Expected active count to match") assert.Equal(t, tt.expectedDeferredCount, counts["deferred"], "Expected hold count to match") }) } }