diff --git a/.gitignore b/.gitignore index 2442cc07d..b751ad9b7 100644 --- a/.gitignore +++ b/.gitignore @@ -6,7 +6,7 @@ dist/ agent.exe agent.exe~ .DS_Store -observe-agent/ +ocb-build/ #integration *.pem diff --git a/builder-config.yaml b/builder-config.yaml index 365cee24e..c25fd18f6 100644 --- a/builder-config.yaml +++ b/builder-config.yaml @@ -20,6 +20,7 @@ processors: - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.105.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.105.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.105.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.105.0 receivers: - gomod: go.opentelemetry.io/collector/receiver/otlpreceiver v0.105.0 diff --git a/cmd/collector/components.go b/cmd/collector/components.go index f54faefdc..e42ef2524 100644 --- a/cmd/collector/components.go +++ b/cmd/collector/components.go @@ -24,6 +24,7 @@ import ( k8sattributesprocessor "github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor" resourcedetectionprocessor "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor" transformprocessor "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor" + filterprocessor "github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor" otlpreceiver "go.opentelemetry.io/collector/receiver/otlpreceiver" awsecscontainermetricsreceiver "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsecscontainermetricsreceiver" dockerstatsreceiver "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/dockerstatsreceiver" @@ -126,6 +127,7 @@ func components() (otelcol.Factories, error) { k8sattributesprocessor.NewFactory(), resourcedetectionprocessor.NewFactory(), transformprocessor.NewFactory(), + filterprocessor.NewFactory(), ) if err != nil { return otelcol.Factories{}, err @@ -137,6 +139,7 @@ func components() (otelcol.Factories, error) { factories.ProcessorModules[k8sattributesprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.105.0" factories.ProcessorModules[resourcedetectionprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.105.0" factories.ProcessorModules[transformprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.105.0" + factories.ProcessorModules[filterprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.105.0" factories.Connectors, err = connector.MakeFactoryMap( countconnector.NewFactory(), diff --git a/cmd/collector/go.mod b/cmd/collector/go.mod index 748fafda1..1dd077091 100644 --- a/cmd/collector/go.mod +++ b/cmd/collector/go.mod @@ -12,6 +12,7 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.105.0 github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage/filestorage v0.105.0 github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.105.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.105.0 github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.105.0 github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.105.0 github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.105.0 diff --git a/cmd/collector/go.sum b/cmd/collector/go.sum index 65f8755bb..bd6f6d919 100644 --- a/cmd/collector/go.sum +++ b/cmd/collector/go.sum @@ -648,6 +648,8 @@ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/winperfcounters v0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/winperfcounters v0.105.0/go.mod h1:i1w6EWM/2TBsopCxkQfE+u96DFgfWoo69pRGPLf7peI= github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.105.0 h1:07WoPlHMy6MGtwToEVaxWODM873QlS9NFzjjc+5fvnA= github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.105.0/go.mod h1:hcCUGzof6T7S400eqMsU9IsW0ht9YjLhrbm2IvKezt8= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.105.0 h1:oRa+acTM4f5rjTT3+hjOVM1LYrlwrm6CSNG4o/RIqcA= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.105.0/go.mod h1:66cZFd4X8vQBTmvm1hPHxrSNHS474iUEsAVbYk9xQBU= github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.105.0 h1:ScIwuYg6l79Ta+deOyZIADXrBlXSdeAZ7sp3MXhm7JY= github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.105.0/go.mod h1:pranRmnWRkzDsn9a16BzSqX6HJ6XjjVVFmMhyZPEzt0= github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.105.0 h1:c/amt4jBLbjIpi4CtRUjQW2gdQbVA607TEX8BCgCwe4= diff --git a/cmd/connections/connections.go b/cmd/connections/connections.go index f1a2cd23f..8ea5a0146 100644 --- a/cmd/connections/connections.go +++ b/cmd/connections/connections.go @@ -57,4 +57,5 @@ func (c ConnectionType) GetConfigFilePaths() []string { var AllConnectionTypes = []*ConnectionType{ &HostMonitoringConnectionType, + &SelfMonitoringConnectionType, } diff --git a/cmd/connections/self_monitoring.go b/cmd/connections/self_monitoring.go new file mode 100644 index 000000000..37b0ef07f --- /dev/null +++ b/cmd/connections/self_monitoring.go @@ -0,0 +1,15 @@ +package connections + +type SelfMonitoringConfig struct { + enabled bool +} + +var SelfMonitoringConnectionType = ConnectionType{ + Name: "self_monitoring", + ConfigFields: []CollectorConfigFragment{ + { + configYAMLPath: "enabled", + colConfigFilePath: "logs_and_metrics.yaml", + }, + }, +} diff --git a/go.work.sum b/go.work.sum index 8cf1a740f..c9cc1fbc4 100644 --- a/go.work.sum +++ b/go.work.sum @@ -19,14 +19,36 @@ cloud.google.com/go/storage v1.35.1/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYE dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9 h1:VpgP7xuJadIUuKccphEpTJnWhS2jkQyMt6Y7pJCD7fY= git.sr.ht/~sbinet/gg v0.5.0 h1:6V43j30HM623V329xA9Ntq+WJrMjDxRjuAB1LFWF5m8= git.sr.ht/~sbinet/gg v0.5.0/go.mod h1:G2C0eRESqlKhS7ErsNey6HHrqU1PwsnCQlekFi9Q2Oo= +github.com/Azure/azure-amqp-common-go/v4 v4.2.0/go.mod h1:GD3m/WPPma+621UaU6KNjKEo5Hl09z86viKwQjTpV0Q= +github.com/Azure/azure-event-hubs-go/v3 v3.6.2/go.mod h1:n+ocYr9j2JCLYqUqz9eI+lx/TEAtL/g6rZzyTFSuIpc= github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= +github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw= +github.com/Azure/azure-sdk-for-go v65.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.12.0 h1:1nGuui+4POelzDwI7RG56yfQJHCnKvwfMoU7VsEp+Zg= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.12.0/go.mod h1:99EvauvlcJ1U06amZiksfYz/3aFGyIhWGHVyiZXtBAI= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.9.0 h1:H+U3Gk9zY56G3u872L82bk4thcsy2Gghb9ExT4Zvm1o= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.9.0/go.mod h1:mgrmMSgaLp9hmax62XQTd0N4aAqSE5E0DulSpVYK7vc= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.2/go.mod h1:dmXQgZuiSubAecswZE+Sm8jkvEa7kQgTPVRvwL/nd0E= github.com/Azure/azure-storage-blob-go v0.15.0 h1:rXtgp8tN1p29GvpGgfJetavIG0V7OgcSXPpwp3tx6qk= github.com/Azure/azure-storage-blob-go v0.15.0/go.mod h1:vbjsVbX0dlxnRc4FFMPsS9BsJWPcne7GB7onqlPvz58= +github.com/Azure/go-amqp v1.0.2/go.mod h1:vZAogwdrkbyK3Mla8m/CxSc/aKdnTZ4IbPxl51Y5WZE= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest/autorest v0.11.12 h1:gI8ytXbxMfI+IVbI9mP2JGCTXIuhHLgRlvQ9X4PsnHE= +github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA= github.com/Azure/go-autorest/autorest/adal v0.9.5 h1:Y3bBUV4rTuxenJJs41HU3qmqsb+auo+a3Lz+PlJPpL0= +github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= +github.com/Azure/go-autorest/autorest/adal v0.9.21/go.mod h1:zua7mBUaCc5YnSLKYgGJR/w5ePdMDA6H56upLsHzA9U= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= +github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= +github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= +github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE= +github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802 h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc= github.com/DataDog/datadog-go v3.2.0+incompatible h1:qSG2N4FghB1He/r2mFrWKCaL7dXCilEuNEeAn20fdD4= @@ -84,10 +106,13 @@ github.com/crossdock/crossdock-go v0.0.0-20160816171116-049aabb0122b h1:WR1qVJzb github.com/crossdock/crossdock-go v0.0.0-20160816171116-049aabb0122b/go.mod h1:v9FBN7gdVTpiD/+LZ7Po0UKvROyT87uLVxTHVky/dlQ= github.com/dave/jennifer v1.6.0 h1:MQ/6emI2xM7wt0tJzJzyUik2Q3Tcn2eE0vtYgh4GPVI= github.com/dave/jennifer v1.6.0/go.mod h1:AxTG893FiZKqxy3FP1kL80VMshSMuz2G+EgvszgGRnk= +github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= github.com/dgraph-io/badger/v4 v4.2.0 h1:kJrlajbXXL9DFTNuhhu9yCx7JJa4qpYWxtE8BzuWsEs= github.com/dgraph-io/badger/v4 v4.2.0/go.mod h1:qfCqhPoWDFJRx1gp5QwwyGo8xk1lbHUxvK9nK0OGAak= github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= +github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= +github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815 h1:bWDMxwH3px2JBh6AyO7hdCn/PkvCZXii8TGj7sbtEbQ= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -137,6 +162,9 @@ github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0= github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= +github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4= @@ -210,6 +238,7 @@ github.com/olivere/elastic v6.2.37+incompatible/go.mod h1:J+q1zQJTgAz9woqsbVRqGe github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.104.0 h1:6dvpPt8pCcV+TfMnnanFk2NQYf9HN1voSS9iIHdW+L8= github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.104.0/go.mod h1:MfSM6mt9qH3vHCaj2rlX6IY/7fN+zCLzNJC25XG9rNU= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/azureblobreceiver v0.105.0/go.mod h1:zmo2uj1VgQAvK3iOus2L7cha1pU3wkmFGn8VGDGCgLE= github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.104.0 h1:9HJ3ejNoiMFWxTRy9gobdurEocf79QlxwlYrOY9tMIQ= github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.104.0/go.mod h1:Ax4DroNn/xKyjWoJCd3FQE9xOZqHSTdDEj1I3HLNOeQ= github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.104.0 h1:U04Ezl3Keb1j6bcVktvgvAbbMEyPDkM5sNboQgPYI1w= @@ -287,6 +316,7 @@ go.opentelemetry.io/contrib/samplers/jaegerremote v0.22.0 h1:OYxqumWcd1yaV/qvCt1 go.opentelemetry.io/contrib/samplers/jaegerremote v0.22.0/go.mod h1:2tZTRqCbvx7nG57wUwd5NQpNVujOWnR84iPLllIH0Ok= go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/image v0.14.0 h1:tNgSxAFe3jC4uYqvZdTr84SZoM1KfwdC9SKIFrLjFn4= golang.org/x/image v0.14.0/go.mod h1:HUYqC05R2ZcZ3ejNQsIHQDQiwWM4JBqmm6MKANTp4LE= golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= diff --git a/packaging/linux/config/observe-agent.yaml b/packaging/linux/config/observe-agent.yaml index ab634a797..cc70ab327 100644 --- a/packaging/linux/config/observe-agent.yaml +++ b/packaging/linux/config/observe-agent.yaml @@ -7,6 +7,9 @@ observe_url: "${OBSERVE_COLLECTION_ENDPOINT}" # Debug mode - Sets agent log level to debug debug: false +self_monitoring: + enabled: true + host_monitoring: enabled: true logs: diff --git a/packaging/linux/etc/observe-agent/connections/self_monitoring/logs_and_metrics.yaml b/packaging/linux/etc/observe-agent/connections/self_monitoring/logs_and_metrics.yaml new file mode 100644 index 000000000..2d59c8987 --- /dev/null +++ b/packaging/linux/etc/observe-agent/connections/self_monitoring/logs_and_metrics.yaml @@ -0,0 +1,52 @@ +receivers: + filestats/agent: + include: '/etc/observe-agent/otel-collector.yaml' + collection_interval: 240m + initial_delay: 60s + + filelog/agent-config: # TODO: Add observe-agent.yaml once we can obfuscate sensitive config fields + include: [/etc/observe-agent/otel-collector.yaml] + start_at: beginning + poll_interval: 5m + multiline: + line_end_pattern: ENDOFLINEPATTERN + + prometheus/agent: + config: + scrape_configs: + - job_name: 'otelcol' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] + metric_relabel_configs: + - source_labels: [__name__] + regex: '.*grpc_io.*' + action: drop + + journald/agent: + units: + - observe-agent + priority: info + +service: + pipelines: + metrics/agent-filestats: + receivers: [filestats/agent] + processors: [resourcedetection, resourcedetection/cloud] + exporters: [otlphttp/observe] + + metrics/agent-internal: + receivers: [prometheus/agent, count] + processors: [memory_limiter, transform/truncate, resourcedetection, resourcedetection/cloud, batch] + exporters: [otlphttp/observe] + + logs/agent-journald: + receivers: [journald/agent] + processors: [memory_limiter, transform/truncate, resourcedetection, resourcedetection/cloud, batch] + exporters: [otlphttp/observe, count] + + logs/agent-config: + receivers: [filelog/agent-config] + processors: [memory_limiter, transform/truncate, resourcedetection, resourcedetection/cloud, batch] + exporters: [otlphttp/observe] + \ No newline at end of file diff --git a/packaging/linux/etc/observe-agent/otel-collector.yaml b/packaging/linux/etc/observe-agent/otel-collector.yaml index b3444946c..30d36c842 100644 --- a/packaging/linux/etc/observe-agent/otel-collector.yaml +++ b/packaging/linux/etc/observe-agent/otel-collector.yaml @@ -16,35 +16,6 @@ receivers: http: endpoint: localhost:4318 - filestats/agent: - include: '/etc/observe-agent/otel-collector.yaml' - collection_interval: 240m - initial_delay: 60s - - filelog/agent-config: # TODO: Add observe-agent.yaml once we can obfuscate sensitive config fields - include: [/etc/observe-agent/otel-collector.yaml] - start_at: beginning - poll_interval: 5m - multiline: - line_end_pattern: ENDOFLINEPATTERN - - prometheus/agent: - config: - scrape_configs: - - job_name: 'otelcol' - scrape_interval: 10s - static_configs: - - targets: ['0.0.0.0:8888'] - metric_relabel_configs: - - source_labels: [__name__] - regex: '.*grpc_io.*' - action: drop - - journald/agent: - units: - - observe-agent - priority: info - processors: # Snowflake limit for identifiers: Regardless of whether an identifier is unquoted or double-quoted, the maximum number of characters allowed is 255 (including blank spaces). # https://docs.snowflake.com/en/sql-reference/identifiers-syntax#identifier-requirements @@ -99,7 +70,12 @@ processors: detectors: ["gcp", "ecs", "ec2", "azure"] timeout: 2s override: false - + + filter/count: + error_mode: ignore + metrics: + metric: + - 'IsMatch(name, ".*")' exporters: otlphttp/observe: @@ -111,33 +87,15 @@ exporters: queue_size: 100 retry_on_failure: enabled: true + + debug: service: pipelines: - metrics/agent-filestats: - receivers: [filestats/agent] - processors: [resourcedetection, resourcedetection/cloud] - exporters: [otlphttp/observe] - - metrics/agent-internal: - receivers: [prometheus/agent, count] - processors: [memory_limiter, transform/truncate, resourcedetection, resourcedetection/cloud, batch] - exporters: [otlphttp/observe] - metrics/forward: receivers: [otlp] processors: [resourcedetection, resourcedetection/cloud] exporters: [otlphttp/observe] - - logs/agent-journald: - receivers: [journald/agent] - processors: [memory_limiter, transform/truncate, resourcedetection, resourcedetection/cloud, batch] - exporters: [otlphttp/observe, count] - - logs/agent-config: - receivers: [filelog/agent-config] - processors: [memory_limiter, transform/truncate, resourcedetection, resourcedetection/cloud, batch] - exporters: [otlphttp/observe] logs/forward: receivers: [otlp] @@ -149,6 +107,11 @@ service: processors: [resourcedetection, resourcedetection/cloud] exporters: [otlphttp/observe] + metrics/count-nooop: + receivers: [count] + processors: [filter/count] + exporters: [debug] + extensions: [health_check, file_storage] telemetry: metrics: diff --git a/packaging/windows/config/otel-collector.yaml b/packaging/windows/config/otel-collector.yaml index 05b858bde..147591b94 100644 --- a/packaging/windows/config/otel-collector.yaml +++ b/packaging/windows/config/otel-collector.yaml @@ -16,31 +16,6 @@ receivers: http: endpoint: localhost:4318 - filestats/agent: - include: 'C:\Program Files\Observe\observe-agent\config\otel-collector.yaml' - collection_interval: 240m - initial_delay: 60s - - filelog/agent-config: # TODO: Add observe-agent.yaml once we can obfuscate sensitive config fields - include: ['C:\Program Files\Observe\observe-agent\config\otel-collector.yaml'] - start_at: beginning - poll_interval: 5m - multiline: - line_end_pattern: ENDOFLINEPATTERN - - prometheus/agent: - config: - scrape_configs: - - job_name: 'otelcol' - scrape_interval: 10s - static_configs: - - targets: ['0.0.0.0:8888'] - metric_relabel_configs: - - source_labels: [__name__] - regex: '.*grpc_io.*' - action: drop - - processors: # Snowflake limit for identifiers: Regardless of whether an identifier is unquoted or double-quoted, the maximum number of characters allowed is 255 (including blank spaces). # https://docs.snowflake.com/en/sql-reference/identifiers-syntax#identifier-requirements @@ -76,6 +51,12 @@ processors: timeout: 2s override: false + filter/count: + error_mode: ignore + metrics: + metric: + - 'IsMatch(name, ".*")' + exporters: otlphttp/observe: endpoint: ${env:OBSERVE_ENDPOINT} @@ -87,28 +68,15 @@ exporters: retry_on_failure: enabled: true + debug: + service: pipelines: - metrics/agent-filestats: - receivers: [filestats/agent] - processors: [resourcedetection, resourcedetection/cloud] - exporters: [otlphttp/observe] - - metrics/agent-internal: - receivers: [prometheus/agent, count] - processors: [memory_limiter, transform/truncate, resourcedetection, resourcedetection/cloud, batch] - exporters: [otlphttp/observe] - metrics/forward: receivers: [otlp] processors: [memory_limiter, transform/truncate, resourcedetection, resourcedetection/cloud, batch] exporters: [otlphttp/observe] - logs/agent-config: - receivers: [filelog/agent-config] - processors: [memory_limiter, transform/truncate, resourcedetection, resourcedetection/cloud, batch] - exporters: [otlphttp/observe] - logs/forward: receivers: [otlp] processors: [memory_limiter, transform/truncate, resourcedetection, resourcedetection/cloud, batch] @@ -118,6 +86,11 @@ service: receivers: [otlp] processors: [memory_limiter, transform/truncate, resourcedetection, resourcedetection/cloud, batch] exporters: [otlphttp/observe] + + metrics/count-nooop: + receivers: [count] + processors: [filter/count] + exporters: [debug] extensions: [health_check, file_storage] telemetry: diff --git a/packaging/windows/connections/self_monitoring/logs_and_metrics.yaml b/packaging/windows/connections/self_monitoring/logs_and_metrics.yaml new file mode 100644 index 000000000..d76fa36c5 --- /dev/null +++ b/packaging/windows/connections/self_monitoring/logs_and_metrics.yaml @@ -0,0 +1,41 @@ +receivers: + filestats/agent: + include: 'C:\Program Files\Observe\observe-agent\config\otel-collector.yaml' + collection_interval: 240m + initial_delay: 60s + + filelog/agent-config: # TODO: Add observe-agent.yaml once we can obfuscate sensitive config fields + include: ['C:\Program Files\Observe\observe-agent\config\otel-collector.yaml'] + start_at: beginning + poll_interval: 5m + multiline: + line_end_pattern: ENDOFLINEPATTERN + + prometheus/agent: + config: + scrape_configs: + - job_name: 'otelcol' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] + metric_relabel_configs: + - source_labels: [__name__] + regex: '.*grpc_io.*' + action: drop + +service: + pipelines: + metrics/agent-filestats: + receivers: [filestats/agent] + processors: [resourcedetection, resourcedetection/cloud] + exporters: [otlphttp/observe] + + metrics/agent-internal: + receivers: [prometheus/agent, count] + processors: [memory_limiter, transform/truncate, resourcedetection, resourcedetection/cloud, batch] + exporters: [otlphttp/observe] + + logs/agent-config: + receivers: [filelog/agent-config] + processors: [memory_limiter, transform/truncate, resourcedetection, resourcedetection/cloud, batch] + exporters: [otlphttp/observe] diff --git a/packaging/windows/observe-agent.yaml b/packaging/windows/observe-agent.yaml index ab634a797..cc70ab327 100644 --- a/packaging/windows/observe-agent.yaml +++ b/packaging/windows/observe-agent.yaml @@ -7,6 +7,9 @@ observe_url: "${OBSERVE_COLLECTION_ENDPOINT}" # Debug mode - Sets agent log level to debug debug: false +self_monitoring: + enabled: true + host_monitoring: enabled: true logs: diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/LICENSE b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/Makefile b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/Makefile new file mode 100644 index 000000000..ded7a3609 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/Makefile @@ -0,0 +1 @@ +include ../../Makefile.Common diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/README.md new file mode 100644 index 000000000..6448b3c79 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/README.md @@ -0,0 +1,228 @@ +# Filter Processor + +| Status | | +| ------------- |-----------| +| Stability | [alpha]: traces, metrics, logs | +| Distributions | [core], [contrib] | +| Warnings | [Orphaned Telemetry, Other](#warnings) | +| Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Aprocessor%2Ffilter%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Aprocessor%2Ffilter) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Aprocessor%2Ffilter%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Aprocessor%2Ffilter) | +| [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@TylerHelmuth](https://www.github.com/TylerHelmuth), [@boostchicken](https://www.github.com/boostchicken) | + +[alpha]: https://github.com/open-telemetry/opentelemetry-collector#alpha +[core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol +[contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib + + +The filterprocessor allows dropping spans, span events, metrics, datapoints, and logs from the collector. + +## Configuration + +The filterprocessor utilizes the [OpenTelemetry Transformation Language](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/README.md) +to create conditions that determine when telemetry should be dropped. +If **any** condition is met, the telemetry is dropped (each condition is ORed together). +Each configuration option corresponds with a different type of telemetry and OTTL Context. +See the table below for details on each context and the fields it exposes. + +| Config | OTTL Context | +|---------------------|------------------------------------------------------------------------------------------------------------------------------------| +| `traces.span` | [Span](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/contexts/ottlspan/README.md) | +| `traces.spanevent` | [SpanEvent](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/contexts/ottlspanevent/README.md) | +| `metrics.metric` | [Metric](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/contexts/ottlmetric/README.md) | +| `metrics.datapoint` | [DataPoint](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/contexts/ottldatapoint/README.md) | +| `logs.log_record` | [Log](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/contexts/ottllog/README.md) | + +The OTTL allows the use of `and`, `or`, and `()` in conditions. +See [OTTL Boolean Expressions](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/README.md#boolean-expressions) for more details. + +For conditions that apply to the same signal, such as spans and span events, if the "higher" level telemetry matches a condition and is dropped, the "lower" level condition will not be checked. +This means that if a span is dropped but a span event condition was defined, the span event condition will not be checked for that span. +The same relationship applies to metrics and datapoints. + +If all span events for a span are dropped, the span will be left intact. +If all datapoints for a metric are dropped, the metric will also be dropped. + +The filter processor also allows configuring an optional field, `error_mode`, which will determine how the processor reacts to errors that occur while processing an OTTL condition. + +| error_mode | description | +|------------|----------------------------------------------------------------------------------------------------------------------------------------| +| ignore | The processor ignores errors returned by conditions, logs them, and continues on to the next condition. This is the recommended mode. | +| silent | The processor ignores errors returned by conditions, does not log them, and continues on to the next condition. | +| propagate | The processor returns the error up the pipeline. This will result in the payload being dropped from the collector. | + +If not specified, `propagate` will be used. + +### Examples + +```yaml +processors: + filter/ottl: + error_mode: ignore + traces: + span: + - 'attributes["container.name"] == "app_container_1"' + - 'resource.attributes["host.name"] == "localhost"' + - 'name == "app_3"' + spanevent: + - 'attributes["grpc"] == true' + - 'IsMatch(name, ".*grpc.*")' + metrics: + metric: + - 'name == "my.metric" and resource.attributes["my_label"] == "abc123"' + - 'type == METRIC_DATA_TYPE_HISTOGRAM' + datapoint: + - 'metric.type == METRIC_DATA_TYPE_SUMMARY' + - 'resource.attributes["service.name"] == "my_service_name"' + logs: + log_record: + - 'IsMatch(body, ".*password.*")' + - 'severity_number < SEVERITY_NUMBER_WARN' +``` + +#### Dropping data based on a resource attribute +```yaml +processors: + filter: + error_mode: ignore + traces: + span: + - IsMatch(resource.attributes["k8s.pod.name"], "my-pod-name.*") +``` + +#### Dropping metrics with invalid type +```yaml +processors: + filter: + error_mode: ignore + metrics: + metric: + - type == METRIC_DATA_TYPE_NONE +``` + +#### Dropping specific metric and value +```yaml +processors: + filter: + error_mode: ignore + metrics: + datapoint: + - metric.name == "k8s.pod.phase" and value_int == 4 +``` + +#### Dropping non-HTTP spans +```yaml +processors: + filter: + error_mode: ignore + traces: + span: + - attributes["http.request.method"] == nil +``` + +#### Dropping HTTP spans +```yaml +processors: + filter: + error_mode: ignore + traces: + span: + - attributes["http.request.method"] != nil +``` + +### OTTL Functions + +The filter processor has access to all [OTTL Converter functions](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/ottlfuncs#converters) + +In addition, the processor defines a few of its own functions: + +**Metrics only functions** +- [HasAttrKeyOnDatapoint](#HasAttrKeyOnDatapoint) +- [HasAttrOnDatapoint](#HasAttrOnDatapoint) + +#### HasAttrKeyOnDatapoint + +`HasAttrKeyOnDatapoint(key)` + +Returns `true` if the given key appears in the attribute map of any datapoint on a metric. +`key` must be a string. You must use the `metrics.metric` context. + +Examples: + +- `HasAttrKeyOnDatapoint("http.method")` + +```yaml +# Drops metrics containing the 'bad.metric' attribute key +filter/keep_good_metrics: + error_mode: ignore + metrics: + metric: + - 'HasAttrKeyOnDatapoint("bad.metric")' +``` + +#### HasAttrOnDatapoint + +`HasAttrOnDatapoint(key, value)` + +Returns `true` if the given key and value appears in the attribute map of any datapoint on a metric. +`key` and `value` must both be strings. If the value of the attribute on the datapoint is not a string, `value` will be compared to `""`. You must use the `metrics.metric` context. + +Examples: + +- `HasAttrOnDatapoint("http.method", "GET")` + +```yaml +# Drops metrics containing the 'bad.metric' attribute key and 'true' value +filter/keep_good_metrics: + error_mode: ignore + metrics: + metric: + - 'HasAttrOnDatapoint("bad.metric", "true")' +``` + +## Troubleshooting + +When using OTTL you can enable debug logging in the collector to print out useful information, +such as if the condition matched and the TransformContext used in the condition, to help you troubleshoot +why a condition is not behaving as you expect. This feature is very verbose, but provides you an accurate +view into how OTTL views the underlying data. + +```yaml +receivers: + filelog: + start_at: beginning + include: [ /Users/tylerhelmuth/projects/opentelemetry-collector-contrib/local/test.log ] + + +processors: + filter: + error_mode: ignore + logs: + log_record: + - body == "test" + +exporters: + debug: + +service: + telemetry: + logs: + level: debug + pipelines: + logs: + receivers: + - filelog + processors: + - filter + exporters: + - debug +``` + +``` +2024-05-29T16:47:04.362-0600 debug ottl@v0.101.0/parser.go:338 condition evaluation result {"kind": "processor", "name": "filter", "pipeline": "logs", "condition": "body == \"test\"", "match": true, "TransformContext": {"resource": {"attributes": {}, "dropped_attribute_count": 0}, "scope": {"attributes": {}, "dropped_attribute_count": 0, "name": "", "version": ""}, "log_record": {"attributes": {"log.file.name": "test.log"}, "body": "test", "dropped_attribute_count": 0, "flags": 0, "observed_time_unix_nano": 1717022824262063000, "severity_number": 0, "severity_text": "", "span_id": "", "time_unix_nano": 0, "trace_id": ""}, "cache": {}}} +``` + +## Warnings + +In general, understand your data before using the filter processor. + +- When using the filterprocessor make sure you understand the look of your incoming data and test the configuration thoroughly. In general, use as specific a configuration as possible to lower the risk of the wrong data being dropped. +- [Orphaned Telemetry](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/standard-warnings.md#orphaned-telemetry): The processor allows dropping spans. Dropping a span may lead to orphaned spans if the dropped span is a parent. Dropping a span may lead to orphaned logs if the log references the dropped span. diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/config.go new file mode 100644 index 000000000..3f517382f --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/config.go @@ -0,0 +1,315 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package filterprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor" + +import ( + "errors" + "fmt" + "strings" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/pdata/plog" + "go.uber.org/multierr" + "go.uber.org/zap" + + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterconfig" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/regexp" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" +) + +// Config defines configuration for Resource processor. +type Config struct { + // ErrorMode determines how the processor reacts to errors that occur while processing an OTTL condition. + // Valid values are `ignore` and `propagate`. + // `ignore` means the processor ignores errors returned by conditions and continues on to the next condition. This is the recommended mode. + // `propagate` means the processor returns the error up the pipeline. This will result in the payload being dropped from the collector. + // The default value is `propagate`. + ErrorMode ottl.ErrorMode `mapstructure:"error_mode"` + + Metrics MetricFilters `mapstructure:"metrics"` + + Logs LogFilters `mapstructure:"logs"` + + Spans filterconfig.MatchConfig `mapstructure:"spans"` + + Traces TraceFilters `mapstructure:"traces"` +} + +// MetricFilters filters by Metric properties. +type MetricFilters struct { + // Include match properties describe metrics that should be included in the Collector Service pipeline, + // all other metrics should be dropped from further processing. + // If both Include and Exclude are specified, Include filtering occurs first. + Include *filterconfig.MetricMatchProperties `mapstructure:"include"` + + // Exclude match properties describe metrics that should be excluded from the Collector Service pipeline, + // all other metrics should be included. + // If both Include and Exclude are specified, Include filtering occurs first. + Exclude *filterconfig.MetricMatchProperties `mapstructure:"exclude"` + + // RegexpConfig specifies options for the regexp match type + RegexpConfig *regexp.Config `mapstructure:"regexp"` + + // MetricConditions is a list of OTTL conditions for an ottlmetric context. + // If any condition resolves to true, the metric will be dropped. + // Supports `and`, `or`, and `()` + MetricConditions []string `mapstructure:"metric"` + + // DataPointConditions is a list of OTTL conditions for an ottldatapoint context. + // If any condition resolves to true, the datapoint will be dropped. + // Supports `and`, `or`, and `()` + DataPointConditions []string `mapstructure:"datapoint"` +} + +// TraceFilters filters by OTTL conditions +type TraceFilters struct { + // SpanConditions is a list of OTTL conditions for an ottlspan context. + // If any condition resolves to true, the span will be dropped. + // Supports `and`, `or`, and `()` + SpanConditions []string `mapstructure:"span"` + + // SpanEventConditions is a list of OTTL conditions for an ottlspanevent context. + // If any condition resolves to true, the span event will be dropped. + // Supports `and`, `or`, and `()` + SpanEventConditions []string `mapstructure:"spanevent"` +} + +// LogFilters filters by Log properties. +type LogFilters struct { + // Include match properties describe logs that should be included in the Collector Service pipeline, + // all other logs should be dropped from further processing. + // If both Include and Exclude are specified, Include filtering occurs first. + Include *LogMatchProperties `mapstructure:"include"` + // Exclude match properties describe logs that should be excluded from the Collector Service pipeline, + // all other logs should be included. + // If both Include and Exclude are specified, Include filtering occurs first. + Exclude *LogMatchProperties `mapstructure:"exclude"` + + // LogConditions is a list of OTTL conditions for an ottllog context. + // If any condition resolves to true, the log event will be dropped. + // Supports `and`, `or`, and `()` + LogConditions []string `mapstructure:"log_record"` +} + +// LogMatchType specifies the strategy for matching against `plog.Log`s. +type LogMatchType string + +// These are the MatchTypes that users can specify for filtering +// `plog.Log`s. +const ( + strictType = LogMatchType(filterset.Strict) + regexpType = LogMatchType(filterset.Regexp) +) + +var severityToNumber = map[string]plog.SeverityNumber{ + "1": plog.SeverityNumberTrace, + "2": plog.SeverityNumberTrace2, + "3": plog.SeverityNumberTrace3, + "4": plog.SeverityNumberTrace4, + "5": plog.SeverityNumberDebug, + "6": plog.SeverityNumberDebug2, + "7": plog.SeverityNumberDebug3, + "8": plog.SeverityNumberDebug4, + "9": plog.SeverityNumberInfo, + "10": plog.SeverityNumberInfo2, + "11": plog.SeverityNumberInfo3, + "12": plog.SeverityNumberInfo4, + "13": plog.SeverityNumberWarn, + "14": plog.SeverityNumberWarn2, + "15": plog.SeverityNumberWarn3, + "16": plog.SeverityNumberWarn4, + "17": plog.SeverityNumberError, + "18": plog.SeverityNumberError2, + "19": plog.SeverityNumberError3, + "20": plog.SeverityNumberError4, + "21": plog.SeverityNumberFatal, + "22": plog.SeverityNumberFatal2, + "23": plog.SeverityNumberFatal3, + "24": plog.SeverityNumberFatal4, + "TRACE": plog.SeverityNumberTrace, + "TRACE2": plog.SeverityNumberTrace2, + "TRACE3": plog.SeverityNumberTrace3, + "TRACE4": plog.SeverityNumberTrace4, + "DEBUG": plog.SeverityNumberDebug, + "DEBUG2": plog.SeverityNumberDebug2, + "DEBUG3": plog.SeverityNumberDebug3, + "DEBUG4": plog.SeverityNumberDebug4, + "INFO": plog.SeverityNumberInfo, + "INFO2": plog.SeverityNumberInfo2, + "INFO3": plog.SeverityNumberInfo3, + "INFO4": plog.SeverityNumberInfo4, + "WARN": plog.SeverityNumberWarn, + "WARN2": plog.SeverityNumberWarn2, + "WARN3": plog.SeverityNumberWarn3, + "WARN4": plog.SeverityNumberWarn4, + "ERROR": plog.SeverityNumberError, + "ERROR2": plog.SeverityNumberError2, + "ERROR3": plog.SeverityNumberError3, + "ERROR4": plog.SeverityNumberError4, + "FATAL": plog.SeverityNumberFatal, + "FATAL2": plog.SeverityNumberFatal2, + "FATAL3": plog.SeverityNumberFatal3, + "FATAL4": plog.SeverityNumberFatal4, +} + +var errInvalidSeverity = errors.New("not a valid severity") + +// logSeverity is a type that represents a SeverityNumber as a string +type logSeverity string + +// validate checks that the logSeverity is valid +func (l logSeverity) validate() error { + if l == "" { + // No severity specified, which means to ignore this field. + return nil + } + + capsSeverity := strings.ToUpper(string(l)) + if _, ok := severityToNumber[capsSeverity]; !ok { + return fmt.Errorf("'%s' is not a valid severity: %w", string(l), errInvalidSeverity) + } + return nil +} + +// severityNumber returns the severity number that the logSeverity represents +func (l logSeverity) severityNumber() plog.SeverityNumber { + capsSeverity := strings.ToUpper(string(l)) + return severityToNumber[capsSeverity] +} + +// LogMatchProperties specifies the set of properties in a log to match against and the +// type of string pattern matching to use. +type LogMatchProperties struct { + // LogMatchType specifies the type of matching desired + LogMatchType LogMatchType `mapstructure:"match_type"` + + // ResourceAttributes defines a list of possible resource attributes to match logs against. + // A match occurs if any resource attribute matches all expressions in this given list. + ResourceAttributes []filterconfig.Attribute `mapstructure:"resource_attributes"` + + // RecordAttributes defines a list of possible record attributes to match logs against. + // A match occurs if any record attribute matches at least one expression in this given list. + RecordAttributes []filterconfig.Attribute `mapstructure:"record_attributes"` + + // SeverityTexts is a list of strings that the LogRecord's severity text field must match + // against. + SeverityTexts []string `mapstructure:"severity_texts"` + + // SeverityNumberProperties defines how to match against a log record's SeverityNumber, if defined. + SeverityNumberProperties *LogSeverityNumberMatchProperties `mapstructure:"severity_number"` + + // LogBodies is a list of strings that the LogRecord's body field must match + // against. + LogBodies []string `mapstructure:"bodies"` +} + +// validate checks that the LogMatchProperties is valid +func (lmp LogMatchProperties) validate() error { + if lmp.SeverityNumberProperties != nil { + return lmp.SeverityNumberProperties.validate() + } + return nil +} + +// isEmpty returns true if the properties is "empty" (meaning, there are no filters specified) +// if this is the case, the filter should be ignored. +func (lmp LogMatchProperties) isEmpty() bool { + return len(lmp.ResourceAttributes) == 0 && len(lmp.RecordAttributes) == 0 && + len(lmp.SeverityTexts) == 0 && len(lmp.LogBodies) == 0 && + lmp.SeverityNumberProperties == nil +} + +// matchProperties converts the LogMatchProperties to a corresponding filterconfig.MatchProperties +func (lmp LogMatchProperties) matchProperties() *filterconfig.MatchProperties { + mp := &filterconfig.MatchProperties{ + Config: filterset.Config{ + MatchType: filterset.MatchType(lmp.LogMatchType), + }, + Resources: lmp.ResourceAttributes, + Attributes: lmp.RecordAttributes, + LogSeverityTexts: lmp.SeverityTexts, + LogBodies: lmp.LogBodies, + } + + // Include SeverityNumberProperties if defined + if lmp.SeverityNumberProperties != nil { + mp.LogSeverityNumber = &filterconfig.LogSeverityNumberMatchProperties{ + Min: lmp.SeverityNumberProperties.Min.severityNumber(), + MatchUndefined: lmp.SeverityNumberProperties.MatchUndefined, + } + } + + return mp +} + +type LogSeverityNumberMatchProperties struct { + // Min is the minimum severity needed for the log record to match. + // This corresponds to the short names specified here: + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/data-model.md#displaying-severity + // this field is case-insensitive ("INFO" == "info") + Min logSeverity `mapstructure:"min"` + + // MatchUndefined lets logs records with "unknown" severity match. + // If MinSeverity is not set, this field is ignored, as fields are not matched based on severity. + MatchUndefined bool `mapstructure:"match_undefined"` +} + +// validate checks that the LogMatchProperties is valid +func (lmp LogSeverityNumberMatchProperties) validate() error { + return lmp.Min.validate() +} + +var _ component.Config = (*Config)(nil) + +// Validate checks if the processor configuration is valid +func (cfg *Config) Validate() error { + if (cfg.Traces.SpanConditions != nil || cfg.Traces.SpanEventConditions != nil) && (cfg.Spans.Include != nil || cfg.Spans.Exclude != nil) { + return fmt.Errorf("cannot use ottl conditions and include/exclude for spans at the same time") + } + if (cfg.Metrics.MetricConditions != nil || cfg.Metrics.DataPointConditions != nil) && (cfg.Metrics.Include != nil || cfg.Metrics.Exclude != nil) { + return fmt.Errorf("cannot use ottl conditions and include/exclude for metrics at the same time") + } + if cfg.Logs.LogConditions != nil && (cfg.Logs.Include != nil || cfg.Logs.Exclude != nil) { + return fmt.Errorf("cannot use ottl conditions and include/exclude for logs at the same time") + } + + var errors error + + if cfg.Traces.SpanConditions != nil { + _, err := filterottl.NewBoolExprForSpan(cfg.Traces.SpanConditions, filterottl.StandardSpanFuncs(), ottl.PropagateError, component.TelemetrySettings{Logger: zap.NewNop()}) + errors = multierr.Append(errors, err) + } + + if cfg.Traces.SpanEventConditions != nil { + _, err := filterottl.NewBoolExprForSpanEvent(cfg.Traces.SpanEventConditions, filterottl.StandardSpanEventFuncs(), ottl.PropagateError, component.TelemetrySettings{Logger: zap.NewNop()}) + errors = multierr.Append(errors, err) + } + + if cfg.Metrics.MetricConditions != nil { + _, err := filterottl.NewBoolExprForMetric(cfg.Metrics.MetricConditions, filterottl.StandardMetricFuncs(), ottl.PropagateError, component.TelemetrySettings{Logger: zap.NewNop()}) + errors = multierr.Append(errors, err) + } + + if cfg.Metrics.DataPointConditions != nil { + _, err := filterottl.NewBoolExprForDataPoint(cfg.Metrics.DataPointConditions, filterottl.StandardDataPointFuncs(), ottl.PropagateError, component.TelemetrySettings{Logger: zap.NewNop()}) + errors = multierr.Append(errors, err) + } + + if cfg.Logs.LogConditions != nil { + _, err := filterottl.NewBoolExprForLog(cfg.Logs.LogConditions, filterottl.StandardLogFuncs(), ottl.PropagateError, component.TelemetrySettings{Logger: zap.NewNop()}) + errors = multierr.Append(errors, err) + } + + if cfg.Logs.LogConditions != nil && cfg.Logs.Include != nil { + errors = multierr.Append(errors, cfg.Logs.Include.validate()) + } + + if cfg.Logs.LogConditions != nil && cfg.Logs.Exclude != nil { + errors = multierr.Append(errors, cfg.Logs.Exclude.validate()) + } + + return errors +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/doc.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/doc.go new file mode 100644 index 000000000..c47a233ff --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/doc.go @@ -0,0 +1,8 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:generate mdatagen metadata.yaml + +// Package filterprocessor implements a processor for filtering +// (dropping) metrics and/or spans by various properties. +package filterprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor" diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/documentation.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/documentation.md new file mode 100644 index 000000000..b9bb78c5b --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/documentation.md @@ -0,0 +1,31 @@ +[comment]: <> (Code generated by mdatagen. DO NOT EDIT.) + +# filter + +## Internal Telemetry + +The following telemetry is emitted by this component. + +### processor_filter_datapoints.filtered + +Number of metric data points dropped by the filter processor + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| 1 | Sum | Int | true | + +### processor_filter_logs.filtered + +Number of logs dropped by the filter processor + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| 1 | Sum | Int | true | + +### processor_filter_spans.filtered + +Number of spans dropped by the filter processor + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| 1 | Sum | Int | true | diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/factory.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/factory.go new file mode 100644 index 000000000..b92067f9a --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/factory.go @@ -0,0 +1,92 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package filterprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor" + +import ( + "context" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/processor" + "go.opentelemetry.io/collector/processor/processorhelper" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/internal/metadata" +) + +var processorCapabilities = consumer.Capabilities{MutatesData: true} + +// NewFactory returns a new factory for the Filter processor. +func NewFactory() processor.Factory { + return processor.NewFactory( + metadata.Type, + createDefaultConfig, + processor.WithMetrics(createMetricsProcessor, metadata.MetricsStability), + processor.WithLogs(createLogsProcessor, metadata.LogsStability), + processor.WithTraces(createTracesProcessor, metadata.TracesStability), + ) +} + +func createDefaultConfig() component.Config { + return &Config{ + ErrorMode: ottl.PropagateError, + } +} + +func createMetricsProcessor( + ctx context.Context, + set processor.Settings, + cfg component.Config, + nextConsumer consumer.Metrics, +) (processor.Metrics, error) { + fp, err := newFilterMetricProcessor(set, cfg.(*Config)) + if err != nil { + return nil, err + } + return processorhelper.NewMetricsProcessor( + ctx, + set, + cfg, + nextConsumer, + fp.processMetrics, + processorhelper.WithCapabilities(processorCapabilities)) +} + +func createLogsProcessor( + ctx context.Context, + set processor.Settings, + cfg component.Config, + nextConsumer consumer.Logs, +) (processor.Logs, error) { + fp, err := newFilterLogsProcessor(set, cfg.(*Config)) + if err != nil { + return nil, err + } + return processorhelper.NewLogsProcessor( + ctx, + set, + cfg, + nextConsumer, + fp.processLogs, + processorhelper.WithCapabilities(processorCapabilities)) +} + +func createTracesProcessor( + ctx context.Context, + set processor.Settings, + cfg component.Config, + nextConsumer consumer.Traces, +) (processor.Traces, error) { + fp, err := newFilterSpansProcessor(set, cfg.(*Config)) + if err != nil { + return nil, err + } + return processorhelper.NewTracesProcessor( + ctx, + set, + cfg, + nextConsumer, + fp.processTraces, + processorhelper.WithCapabilities(processorCapabilities)) +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/internal/metadata/generated_status.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/internal/metadata/generated_status.go new file mode 100644 index 000000000..d1d914c86 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/internal/metadata/generated_status.go @@ -0,0 +1,17 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "go.opentelemetry.io/collector/component" +) + +var ( + Type = component.MustNewType("filter") +) + +const ( + TracesStability = component.StabilityLevelAlpha + MetricsStability = component.StabilityLevelAlpha + LogsStability = component.StabilityLevelAlpha +) diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/internal/metadata/generated_telemetry.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/internal/metadata/generated_telemetry.go new file mode 100644 index 000000000..8567ac334 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/internal/metadata/generated_telemetry.go @@ -0,0 +1,76 @@ +// Code generated by mdatagen. DO NOT EDIT. + +package metadata + +import ( + "errors" + + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" + "go.opentelemetry.io/otel/trace" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configtelemetry" +) + +func Meter(settings component.TelemetrySettings) metric.Meter { + return settings.MeterProvider.Meter("otelcol/filter") +} + +func Tracer(settings component.TelemetrySettings) trace.Tracer { + return settings.TracerProvider.Tracer("otelcol/filter") +} + +// TelemetryBuilder provides an interface for components to report telemetry +// as defined in metadata and user config. +type TelemetryBuilder struct { + meter metric.Meter + ProcessorFilterDatapointsFiltered metric.Int64Counter + ProcessorFilterLogsFiltered metric.Int64Counter + ProcessorFilterSpansFiltered metric.Int64Counter + level configtelemetry.Level +} + +// telemetryBuilderOption applies changes to default builder. +type telemetryBuilderOption func(*TelemetryBuilder) + +// WithLevel sets the current telemetry level for the component. +func WithLevel(lvl configtelemetry.Level) telemetryBuilderOption { + return func(builder *TelemetryBuilder) { + builder.level = lvl + } +} + +// NewTelemetryBuilder provides a struct with methods to update all internal telemetry +// for a component +func NewTelemetryBuilder(settings component.TelemetrySettings, options ...telemetryBuilderOption) (*TelemetryBuilder, error) { + builder := TelemetryBuilder{level: configtelemetry.LevelBasic} + for _, op := range options { + op(&builder) + } + var err, errs error + if builder.level >= configtelemetry.LevelBasic { + builder.meter = Meter(settings) + } else { + builder.meter = noop.Meter{} + } + builder.ProcessorFilterDatapointsFiltered, err = builder.meter.Int64Counter( + "processor_filter_datapoints.filtered", + metric.WithDescription("Number of metric data points dropped by the filter processor"), + metric.WithUnit("1"), + ) + errs = errors.Join(errs, err) + builder.ProcessorFilterLogsFiltered, err = builder.meter.Int64Counter( + "processor_filter_logs.filtered", + metric.WithDescription("Number of logs dropped by the filter processor"), + metric.WithUnit("1"), + ) + errs = errors.Join(errs, err) + builder.ProcessorFilterSpansFiltered, err = builder.meter.Int64Counter( + "processor_filter_spans.filtered", + metric.WithDescription("Number of spans dropped by the filter processor"), + metric.WithUnit("1"), + ) + errs = errors.Join(errs, err) + return &builder, errs +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/logs.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/logs.go new file mode 100644 index 000000000..d3e3ed2bb --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/logs.go @@ -0,0 +1,105 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package filterprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/processor" + "go.opentelemetry.io/collector/processor/processorhelper" + "go.uber.org/multierr" + "go.uber.org/zap" + + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterconfig" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterlog" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog" +) + +type filterLogProcessor struct { + skipExpr expr.BoolExpr[ottllog.TransformContext] + telemetry *filterProcessorTelemetry + logger *zap.Logger +} + +func newFilterLogsProcessor(set processor.Settings, cfg *Config) (*filterLogProcessor, error) { + flp := &filterLogProcessor{ + logger: set.Logger, + } + + fpt, err := newfilterProcessorTelemetry(set) + if err != nil { + return nil, fmt.Errorf("error creating filter processor telemetry: %w", err) + } + flp.telemetry = fpt + + if cfg.Logs.LogConditions != nil { + skipExpr, errBoolExpr := filterottl.NewBoolExprForLog(cfg.Logs.LogConditions, filterottl.StandardLogFuncs(), cfg.ErrorMode, set.TelemetrySettings) + if errBoolExpr != nil { + return nil, errBoolExpr + } + flp.skipExpr = skipExpr + return flp, nil + } + + cfgMatch := filterconfig.MatchConfig{} + if cfg.Logs.Include != nil && !cfg.Logs.Include.isEmpty() { + cfgMatch.Include = cfg.Logs.Include.matchProperties() + } + + if cfg.Logs.Exclude != nil && !cfg.Logs.Exclude.isEmpty() { + cfgMatch.Exclude = cfg.Logs.Exclude.matchProperties() + } + + skipExpr, err := filterlog.NewSkipExpr(&cfgMatch) + if err != nil { + return nil, fmt.Errorf("failed to build skip matcher: %w", err) + } + flp.skipExpr = skipExpr + + return flp, nil +} + +func (flp *filterLogProcessor) processLogs(ctx context.Context, ld plog.Logs) (plog.Logs, error) { + if flp.skipExpr == nil { + return ld, nil + } + + logCountBeforeFilters := ld.LogRecordCount() + + var errors error + ld.ResourceLogs().RemoveIf(func(rl plog.ResourceLogs) bool { + resource := rl.Resource() + rl.ScopeLogs().RemoveIf(func(sl plog.ScopeLogs) bool { + scope := sl.Scope() + lrs := sl.LogRecords() + lrs.RemoveIf(func(lr plog.LogRecord) bool { + skip, err := flp.skipExpr.Eval(ctx, ottllog.NewTransformContext(lr, scope, resource, sl, rl)) + if err != nil { + errors = multierr.Append(errors, err) + return false + } + return skip + }) + + return sl.LogRecords().Len() == 0 + }) + return rl.ScopeLogs().Len() == 0 + }) + + logCountAfterFilters := ld.LogRecordCount() + flp.telemetry.record(triggerLogsDropped, int64(logCountBeforeFilters-logCountAfterFilters)) + + if errors != nil { + flp.logger.Error("failed processing logs", zap.Error(errors)) + return ld, errors + } + if ld.ResourceLogs().Len() == 0 { + return ld, processorhelper.ErrSkipProcessingData + } + return ld, nil +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/metadata.yaml b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/metadata.yaml new file mode 100644 index 000000000..5474feaa4 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/metadata.yaml @@ -0,0 +1,37 @@ +type: filter +scope_name: otelcol/filter + +status: + class: processor + stability: + alpha: [traces, metrics, logs] + distributions: [core, contrib] + warnings: [Orphaned Telemetry, Other] + codeowners: + active: [TylerHelmuth, boostchicken] +tests: + config: + +telemetry: + metrics: + processor_filter_datapoints.filtered: + enabled: true + description: Number of metric data points dropped by the filter processor + unit: "1" + sum: + value_type: int + monotonic: true + processor_filter_logs.filtered: + enabled: true + description: Number of logs dropped by the filter processor + unit: "1" + sum: + value_type: int + monotonic: true + processor_filter_spans.filtered: + enabled: true + description: Number of spans dropped by the filter processor + unit: "1" + sum: + value_type: int + monotonic: true diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/metrics.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/metrics.go new file mode 100644 index 000000000..ac81eaecf --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/metrics.go @@ -0,0 +1,309 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package filterprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/processor" + "go.opentelemetry.io/collector/processor/processorhelper" + "go.uber.org/multierr" + "go.uber.org/zap" + + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterconfig" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filtermatcher" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filtermetric" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource" +) + +type filterMetricProcessor struct { + skipResourceExpr expr.BoolExpr[ottlresource.TransformContext] + skipMetricExpr expr.BoolExpr[ottlmetric.TransformContext] + skipDataPointExpr expr.BoolExpr[ottldatapoint.TransformContext] + telemetry *filterProcessorTelemetry + logger *zap.Logger +} + +func newFilterMetricProcessor(set processor.Settings, cfg *Config) (*filterMetricProcessor, error) { + var err error + fsp := &filterMetricProcessor{ + logger: set.Logger, + } + + fpt, err := newfilterProcessorTelemetry(set) + if err != nil { + return nil, fmt.Errorf("error creating filter processor telemetry: %w", err) + } + fsp.telemetry = fpt + + if cfg.Metrics.MetricConditions != nil || cfg.Metrics.DataPointConditions != nil { + if cfg.Metrics.MetricConditions != nil { + fsp.skipMetricExpr, err = filterottl.NewBoolExprForMetric(cfg.Metrics.MetricConditions, filterottl.StandardMetricFuncs(), cfg.ErrorMode, set.TelemetrySettings) + if err != nil { + return nil, err + } + } + + if cfg.Metrics.DataPointConditions != nil { + fsp.skipDataPointExpr, err = filterottl.NewBoolExprForDataPoint(cfg.Metrics.DataPointConditions, filterottl.StandardDataPointFuncs(), cfg.ErrorMode, set.TelemetrySettings) + if err != nil { + return nil, err + } + } + + return fsp, nil + } + + fsp.skipResourceExpr, err = newSkipResExpr(cfg.Metrics.Include, cfg.Metrics.Exclude) + if err != nil { + return nil, err + } + + fsp.skipMetricExpr, err = filtermetric.NewSkipExpr(cfg.Metrics.Include, cfg.Metrics.Exclude) + if err != nil { + return nil, err + } + + includeMatchType := "" + var includeExpressions []string + var includeMetricNames []string + var includeResourceAttributes []filterconfig.Attribute + if cfg.Metrics.Include != nil { + includeMatchType = string(cfg.Metrics.Include.MatchType) + includeExpressions = cfg.Metrics.Include.Expressions + includeMetricNames = cfg.Metrics.Include.MetricNames + includeResourceAttributes = cfg.Metrics.Include.ResourceAttributes + } + + excludeMatchType := "" + var excludeExpressions []string + var excludeMetricNames []string + var excludeResourceAttributes []filterconfig.Attribute + if cfg.Metrics.Exclude != nil { + excludeMatchType = string(cfg.Metrics.Exclude.MatchType) + excludeExpressions = cfg.Metrics.Exclude.Expressions + excludeMetricNames = cfg.Metrics.Exclude.MetricNames + excludeResourceAttributes = cfg.Metrics.Exclude.ResourceAttributes + } + + set.Logger.Info( + "Metric filter configured", + zap.String("include match_type", includeMatchType), + zap.Strings("include expressions", includeExpressions), + zap.Strings("include metric names", includeMetricNames), + zap.Any("include metrics with resource attributes", includeResourceAttributes), + zap.String("exclude match_type", excludeMatchType), + zap.Strings("exclude expressions", excludeExpressions), + zap.Strings("exclude metric names", excludeMetricNames), + zap.Any("exclude metrics with resource attributes", excludeResourceAttributes), + ) + + return fsp, nil +} + +// processMetrics filters the given metrics based off the filterMetricProcessor's filters. +func (fmp *filterMetricProcessor) processMetrics(ctx context.Context, md pmetric.Metrics) (pmetric.Metrics, error) { + if fmp.skipResourceExpr == nil && fmp.skipMetricExpr == nil && fmp.skipDataPointExpr == nil { + return md, nil + } + + metricDataPointCountBeforeFilters := md.DataPointCount() + + var errors error + md.ResourceMetrics().RemoveIf(func(rmetrics pmetric.ResourceMetrics) bool { + resource := rmetrics.Resource() + if fmp.skipResourceExpr != nil { + skip, err := fmp.skipResourceExpr.Eval(ctx, ottlresource.NewTransformContext(resource, rmetrics)) + if err != nil { + errors = multierr.Append(errors, err) + return false + } + if skip { + return true + } + } + rmetrics.ScopeMetrics().RemoveIf(func(smetrics pmetric.ScopeMetrics) bool { + scope := smetrics.Scope() + smetrics.Metrics().RemoveIf(func(metric pmetric.Metric) bool { + if fmp.skipMetricExpr != nil { + skip, err := fmp.skipMetricExpr.Eval(ctx, ottlmetric.NewTransformContext(metric, smetrics.Metrics(), scope, resource, smetrics, rmetrics)) + if err != nil { + errors = multierr.Append(errors, err) + } + if skip { + return true + } + } + if fmp.skipDataPointExpr != nil { + //exhaustive:enforce + switch metric.Type() { + case pmetric.MetricTypeSum: + errors = multierr.Append(errors, fmp.handleNumberDataPoints(ctx, metric.Sum().DataPoints(), metric, smetrics.Metrics(), scope, resource)) + return metric.Sum().DataPoints().Len() == 0 + case pmetric.MetricTypeGauge: + errors = multierr.Append(errors, fmp.handleNumberDataPoints(ctx, metric.Gauge().DataPoints(), metric, smetrics.Metrics(), scope, resource)) + return metric.Gauge().DataPoints().Len() == 0 + case pmetric.MetricTypeHistogram: + errors = multierr.Append(errors, fmp.handleHistogramDataPoints(ctx, metric.Histogram().DataPoints(), metric, smetrics.Metrics(), scope, resource)) + return metric.Histogram().DataPoints().Len() == 0 + case pmetric.MetricTypeExponentialHistogram: + errors = multierr.Append(errors, fmp.handleExponetialHistogramDataPoints(ctx, metric.ExponentialHistogram().DataPoints(), metric, smetrics.Metrics(), scope, resource)) + return metric.ExponentialHistogram().DataPoints().Len() == 0 + case pmetric.MetricTypeSummary: + errors = multierr.Append(errors, fmp.handleSummaryDataPoints(ctx, metric.Summary().DataPoints(), metric, smetrics.Metrics(), scope, resource)) + return metric.Summary().DataPoints().Len() == 0 + default: + return false + } + } + return false + }) + return smetrics.Metrics().Len() == 0 + }) + return rmetrics.ScopeMetrics().Len() == 0 + }) + + metricDataPointCountAfterFilters := md.DataPointCount() + fmp.telemetry.record(triggerMetricDataPointsDropped, int64(metricDataPointCountBeforeFilters-metricDataPointCountAfterFilters)) + + if errors != nil { + fmp.logger.Error("failed processing metrics", zap.Error(errors)) + return md, errors + } + if md.ResourceMetrics().Len() == 0 { + return md, processorhelper.ErrSkipProcessingData + } + return md, nil +} + +func newSkipResExpr(include *filterconfig.MetricMatchProperties, exclude *filterconfig.MetricMatchProperties) (expr.BoolExpr[ottlresource.TransformContext], error) { + if filtermetric.UseOTTLBridge.IsEnabled() { + mp := filterconfig.MatchConfig{} + + if include != nil { + mp.Include = &filterconfig.MatchProperties{ + Config: filterset.Config{ + MatchType: filterset.MatchType(include.MatchType), + RegexpConfig: include.RegexpConfig, + }, + Resources: include.ResourceAttributes, + } + } + + if exclude != nil { + mp.Exclude = &filterconfig.MatchProperties{ + Config: filterset.Config{ + MatchType: filterset.MatchType(exclude.MatchType), + RegexpConfig: exclude.RegexpConfig, + }, + Resources: exclude.ResourceAttributes, + } + } + + return filterottl.NewResourceSkipExprBridge(&mp) + } + + var matchers []expr.BoolExpr[ottlresource.TransformContext] + inclExpr, err := newResExpr(include) + if err != nil { + return nil, err + } + if inclExpr != nil { + matchers = append(matchers, expr.Not(inclExpr)) + } + exclExpr, err := newResExpr(exclude) + if err != nil { + return nil, err + } + if exclExpr != nil { + matchers = append(matchers, exclExpr) + } + return expr.Or(matchers...), nil +} + +type resExpr filtermatcher.AttributesMatcher + +func (r resExpr) Eval(_ context.Context, tCtx ottlresource.TransformContext) (bool, error) { + return filtermatcher.AttributesMatcher(r).Match(tCtx.GetResource().Attributes()), nil +} + +func newResExpr(mp *filterconfig.MetricMatchProperties) (expr.BoolExpr[ottlresource.TransformContext], error) { + if mp == nil { + return nil, nil + } + attributeMatcher, err := filtermatcher.NewAttributesMatcher( + filterset.Config{ + MatchType: filterset.MatchType(mp.MatchType), + RegexpConfig: mp.RegexpConfig, + }, + mp.ResourceAttributes, + ) + if err != nil { + return nil, err + } + if attributeMatcher == nil { + return nil, err + } + return resExpr(attributeMatcher), nil +} + +func (fmp *filterMetricProcessor) handleNumberDataPoints(ctx context.Context, dps pmetric.NumberDataPointSlice, metric pmetric.Metric, metrics pmetric.MetricSlice, is pcommon.InstrumentationScope, resource pcommon.Resource) error { + var errors error + dps.RemoveIf(func(datapoint pmetric.NumberDataPoint) bool { + skip, err := fmp.skipDataPointExpr.Eval(ctx, ottldatapoint.NewTransformContext(datapoint, metric, metrics, is, resource, pmetric.NewScopeMetrics(), pmetric.NewResourceMetrics())) + if err != nil { + errors = multierr.Append(errors, err) + return false + } + return skip + }) + return errors +} + +func (fmp *filterMetricProcessor) handleHistogramDataPoints(ctx context.Context, dps pmetric.HistogramDataPointSlice, metric pmetric.Metric, metrics pmetric.MetricSlice, is pcommon.InstrumentationScope, resource pcommon.Resource) error { + var errors error + dps.RemoveIf(func(datapoint pmetric.HistogramDataPoint) bool { + skip, err := fmp.skipDataPointExpr.Eval(ctx, ottldatapoint.NewTransformContext(datapoint, metric, metrics, is, resource, pmetric.NewScopeMetrics(), pmetric.NewResourceMetrics())) + if err != nil { + errors = multierr.Append(errors, err) + return false + } + return skip + }) + return errors +} + +func (fmp *filterMetricProcessor) handleExponetialHistogramDataPoints(ctx context.Context, dps pmetric.ExponentialHistogramDataPointSlice, metric pmetric.Metric, metrics pmetric.MetricSlice, is pcommon.InstrumentationScope, resource pcommon.Resource) error { + var errors error + dps.RemoveIf(func(datapoint pmetric.ExponentialHistogramDataPoint) bool { + skip, err := fmp.skipDataPointExpr.Eval(ctx, ottldatapoint.NewTransformContext(datapoint, metric, metrics, is, resource, pmetric.NewScopeMetrics(), pmetric.NewResourceMetrics())) + if err != nil { + errors = multierr.Append(errors, err) + return false + } + return skip + }) + return errors +} + +func (fmp *filterMetricProcessor) handleSummaryDataPoints(ctx context.Context, dps pmetric.SummaryDataPointSlice, metric pmetric.Metric, metrics pmetric.MetricSlice, is pcommon.InstrumentationScope, resource pcommon.Resource) error { + var errors error + dps.RemoveIf(func(datapoint pmetric.SummaryDataPoint) bool { + skip, err := fmp.skipDataPointExpr.Eval(ctx, ottldatapoint.NewTransformContext(datapoint, metric, metrics, is, resource, pmetric.NewScopeMetrics(), pmetric.NewResourceMetrics())) + if err != nil { + errors = multierr.Append(errors, err) + return false + } + return skip + }) + return errors +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/telemetry.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/telemetry.go new file mode 100644 index 000000000..30ad4ee36 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/telemetry.go @@ -0,0 +1,54 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package filterprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor" + +import ( + "context" + + "go.opentelemetry.io/collector/processor" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/internal/metadata" +) + +type trigger int + +const ( + triggerMetricDataPointsDropped trigger = iota + triggerLogsDropped + triggerSpansDropped +) + +type filterProcessorTelemetry struct { + exportCtx context.Context + + processorAttr []attribute.KeyValue + + telemetryBuilder *metadata.TelemetryBuilder +} + +func newfilterProcessorTelemetry(set processor.Settings) (*filterProcessorTelemetry, error) { + telemetryBuilder, err := metadata.NewTelemetryBuilder(set.TelemetrySettings) + if err != nil { + return nil, err + } + + return &filterProcessorTelemetry{ + processorAttr: []attribute.KeyValue{attribute.String(metadata.Type.String(), set.ID.String())}, + exportCtx: context.Background(), + telemetryBuilder: telemetryBuilder, + }, nil +} + +func (fpt *filterProcessorTelemetry) record(trigger trigger, dropped int64) { + switch trigger { + case triggerMetricDataPointsDropped: + fpt.telemetryBuilder.ProcessorFilterDatapointsFiltered.Add(fpt.exportCtx, dropped, metric.WithAttributes(fpt.processorAttr...)) + case triggerLogsDropped: + fpt.telemetryBuilder.ProcessorFilterLogsFiltered.Add(fpt.exportCtx, dropped, metric.WithAttributes(fpt.processorAttr...)) + case triggerSpansDropped: + fpt.telemetryBuilder.ProcessorFilterSpansFiltered.Add(fpt.exportCtx, dropped, metric.WithAttributes(fpt.processorAttr...)) + } +} diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/traces.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/traces.go new file mode 100644 index 000000000..4ca860e63 --- /dev/null +++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/traces.go @@ -0,0 +1,133 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package filterprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/collector/pdata/ptrace" + "go.opentelemetry.io/collector/processor" + "go.opentelemetry.io/collector/processor/processorhelper" + "go.uber.org/multierr" + "go.uber.org/zap" + + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl" + "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterspan" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspanevent" +) + +type filterSpanProcessor struct { + skipSpanExpr expr.BoolExpr[ottlspan.TransformContext] + skipSpanEventExpr expr.BoolExpr[ottlspanevent.TransformContext] + telemetry *filterProcessorTelemetry + logger *zap.Logger +} + +func newFilterSpansProcessor(set processor.Settings, cfg *Config) (*filterSpanProcessor, error) { + var err error + fsp := &filterSpanProcessor{ + logger: set.Logger, + } + + fpt, err := newfilterProcessorTelemetry(set) + if err != nil { + return nil, fmt.Errorf("error creating filter processor telemetry: %w", err) + } + fsp.telemetry = fpt + + if cfg.Traces.SpanConditions != nil || cfg.Traces.SpanEventConditions != nil { + if cfg.Traces.SpanConditions != nil { + fsp.skipSpanExpr, err = filterottl.NewBoolExprForSpan(cfg.Traces.SpanConditions, filterottl.StandardSpanFuncs(), cfg.ErrorMode, set.TelemetrySettings) + if err != nil { + return nil, err + } + } + if cfg.Traces.SpanEventConditions != nil { + fsp.skipSpanEventExpr, err = filterottl.NewBoolExprForSpanEvent(cfg.Traces.SpanEventConditions, filterottl.StandardSpanEventFuncs(), cfg.ErrorMode, set.TelemetrySettings) + if err != nil { + return nil, err + } + } + return fsp, nil + } + + fsp.skipSpanExpr, err = filterspan.NewSkipExpr(&cfg.Spans) + if err != nil { + return nil, err + } + + includeMatchType, excludeMatchType := "[None]", "[None]" + if cfg.Spans.Include != nil { + includeMatchType = string(cfg.Spans.Include.MatchType) + } + + if cfg.Spans.Exclude != nil { + excludeMatchType = string(cfg.Spans.Exclude.MatchType) + } + + set.Logger.Info( + "Span filter configured", + zap.String("[Include] match_type", includeMatchType), + zap.String("[Exclude] match_type", excludeMatchType), + ) + + return fsp, nil +} + +// processTraces filters the given spans of a traces based off the filterSpanProcessor's filters. +func (fsp *filterSpanProcessor) processTraces(ctx context.Context, td ptrace.Traces) (ptrace.Traces, error) { + if fsp.skipSpanExpr == nil && fsp.skipSpanEventExpr == nil { + return td, nil + } + + spanCountBeforeFilters := td.SpanCount() + + var errors error + td.ResourceSpans().RemoveIf(func(rs ptrace.ResourceSpans) bool { + resource := rs.Resource() + rs.ScopeSpans().RemoveIf(func(ss ptrace.ScopeSpans) bool { + scope := ss.Scope() + ss.Spans().RemoveIf(func(span ptrace.Span) bool { + if fsp.skipSpanExpr != nil { + skip, err := fsp.skipSpanExpr.Eval(ctx, ottlspan.NewTransformContext(span, scope, resource, ss, rs)) + if err != nil { + errors = multierr.Append(errors, err) + return false + } + if skip { + return true + } + } + if fsp.skipSpanEventExpr != nil { + span.Events().RemoveIf(func(spanEvent ptrace.SpanEvent) bool { + skip, err := fsp.skipSpanEventExpr.Eval(ctx, ottlspanevent.NewTransformContext(spanEvent, span, scope, resource, ss, rs)) + if err != nil { + errors = multierr.Append(errors, err) + return false + } + return skip + }) + } + return false + }) + return ss.Spans().Len() == 0 + }) + return rs.ScopeSpans().Len() == 0 + }) + + spanCountAfterFilters := td.SpanCount() + fsp.telemetry.record(triggerSpansDropped, int64(spanCountBeforeFilters-spanCountAfterFilters)) + + if errors != nil { + fsp.logger.Error("failed processing traces", zap.Error(errors)) + return td, errors + } + if td.ResourceSpans().Len() == 0 { + return td, processorhelper.ErrSkipProcessingData + } + return td, nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 78ce2ff7a..67d2e1959 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -890,6 +890,10 @@ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/winperfcounters/in ## explicit; go 1.21.0 github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor/internal/metadata +# github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.105.0 +## explicit; go 1.21.0 +github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor +github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/internal/metadata # github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.105.0 ## explicit; go 1.21.0 github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor