Skip to content

Commit 6e29865

Browse files
danielnelsonotherpirate
authored andcommitted
Update sample config
1 parent 594db5b commit 6e29865

File tree

1 file changed

+110
-8
lines changed

1 file changed

+110
-8
lines changed

etc/telegraf.conf

Lines changed: 110 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -399,7 +399,7 @@
399399
# ## 0 : No compression
400400
# ## 1 : Gzip compression
401401
# ## 2 : Snappy compression
402-
# compression_codec = 0
402+
# # compression_codec = 0
403403
#
404404
# ## RequiredAcks is used in Produce Requests to tell the broker how many
405405
# ## replica acknowledgements it must see before responding
@@ -415,10 +415,11 @@
415415
# ## received the data. This option provides the best durability, we
416416
# ## guarantee that no messages will be lost as long as at least one in
417417
# ## sync replica remains.
418-
# required_acks = -1
418+
# # required_acks = -1
419419
#
420-
# ## The total number of times to retry sending a message
421-
# max_retry = 3
420+
# ## The maximum number of times to retry sending a metric before failing
421+
# ## until the next flush.
422+
# # max_retry = 3
422423
#
423424
# ## Optional TLS Config
424425
# # tls_ca = "/etc/telegraf/ca.pem"
@@ -435,7 +436,7 @@
435436
# ## Each data format has its own unique set of configuration options, read
436437
# ## more about them here:
437438
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md
438-
# data_format = "influx"
439+
# # data_format = "influx"
439440

440441

441442
# # Configuration for the AWS Kinesis output.
@@ -776,6 +777,56 @@
776777
# [[processors.printer]]
777778

778779

780+
# # Print all metrics that pass through this filter.
781+
# [[processors.topk]]
782+
# ## How many seconds between aggregations
783+
# # period = 10
784+
#
785+
# ## How many top metrics to return
786+
# # k = 10
787+
#
788+
# ## Over which tags should the aggregation be done. Globs can be specified, in
789+
# ## which case any tag matching the glob will aggregated over. If set to an
790+
# ## empty list is no aggregation over tags is done
791+
# # group_by = ['*']
792+
#
793+
# ## Over which fields are the top k are calculated
794+
# # fields = ["value"]
795+
#
796+
# ## What aggregation to use. Options: sum, mean, min, max
797+
# # aggregation = "mean"
798+
#
799+
# ## Instead of the top k largest metrics, return the bottom k lowest metrics
800+
# # bottomk = false
801+
#
802+
# ## The plugin assigns each metric a GroupBy tag generated from its name and
803+
# ## tags. If this setting is different than "" the plugin will add a
804+
# ## tag (which name will be the value of this setting) to each metric with
805+
# ## the value of the calculated GroupBy tag. Useful for debugging
806+
# # add_groupby_tag = ""
807+
#
808+
# ## These settings provide a way to know the position of each metric in
809+
# ## the top k. The 'add_rank_field' setting allows to specify for which
810+
# ## fields the position is required. If the list is non empty, then a field
811+
# ## will be added to each and every metric for each string present in this
812+
# ## setting. This field will contain the ranking of the group that
813+
# ## the metric belonged to when aggregated over that field.
814+
# ## The name of the field will be set to the name of the aggregation field,
815+
# ## suffixed with the string '_topk_rank'
816+
# # add_rank_fields = []
817+
#
818+
# ## These settings provide a way to know what values the plugin is generating
819+
# ## when aggregating metrics. The 'add_agregate_field' setting allows to
820+
# ## specify for which fields the final aggregation value is required. If the
821+
# ## list is non empty, then a field will be added to each every metric for
822+
# ## each field present in this setting. This field will contain
823+
# ## the computed aggregation for the group that the metric belonged to when
824+
# ## aggregated over that field.
825+
# ## The name of the field will be set to the name of the aggregation field,
826+
# ## suffixed with the string '_topk_aggregate'
827+
# # add_aggregate_fields = []
828+
829+
779830

780831
###############################################################################
781832
# AGGREGATOR PLUGINS #
@@ -2093,19 +2144,20 @@
20932144
# ##
20942145

20952146

2096-
# # TCP or UDP 'ping' given url and collect response time in seconds
2147+
# # Collect response time of a TCP or UDP connection
20972148
# [[inputs.net_response]]
20982149
# ## Protocol, must be "tcp" or "udp"
20992150
# ## NOTE: because the "udp" protocol does not respond to requests, it requires
21002151
# ## a send/expect string pair (see below).
21012152
# protocol = "tcp"
21022153
# ## Server address (default localhost)
21032154
# address = "localhost:80"
2155+
#
21042156
# ## Set timeout
2105-
# timeout = "1s"
2157+
# # timeout = "1s"
21062158
#
21072159
# ## Set read timeout (only used if expecting a response)
2108-
# read_timeout = "1s"
2160+
# # read_timeout = "1s"
21092161
#
21102162
# ## The following options are required for UDP checks. For TCP, they are
21112163
# ## optional. The plugin will send the given string to the server and then
@@ -2114,6 +2166,9 @@
21142166
# # send = "ssh"
21152167
# ## expected string in answer
21162168
# # expect = "ssh"
2169+
#
2170+
# ## Uncomment to remove deprecated fields
2171+
# # fieldexclude = ["result_type", "string_found"]
21172172

21182173

21192174
# # Read TCP metrics such as established, time wait and sockets counts.
@@ -2990,6 +3045,53 @@
29903045
# # basic_password = "barfoo"
29913046

29923047

3048+
# # Read JTI OpenConfig Telemetry from listed sensors
3049+
# [[inputs.jti_openconfig_telemetry]]
3050+
# ## List of device addresses to collect telemetry from
3051+
# servers = ["localhost:1883"]
3052+
#
3053+
# ## Authentication details. Username and password are must if device expects
3054+
# ## authentication. Client ID must be unique when connecting from multiple instances
3055+
# ## of telegraf to the same device
3056+
# username = "user"
3057+
# password = "pass"
3058+
# client_id = "telegraf"
3059+
#
3060+
# ## Frequency to get data
3061+
# sample_frequency = "1000ms"
3062+
#
3063+
# ## Sensors to subscribe for
3064+
# ## A identifier for each sensor can be provided in path by separating with space
3065+
# ## Else sensor path will be used as identifier
3066+
# ## When identifier is used, we can provide a list of space separated sensors.
3067+
# ## A single subscription will be created with all these sensors and data will
3068+
# ## be saved to measurement with this identifier name
3069+
# sensors = [
3070+
# "/interfaces/",
3071+
# "collection /components/ /lldp",
3072+
# ]
3073+
#
3074+
# ## We allow specifying sensor group level reporting rate. To do this, specify the
3075+
# ## reporting rate in Duration at the beginning of sensor paths / collection
3076+
# ## name. For entries without reporting rate, we use configured sample frequency
3077+
# sensors = [
3078+
# "1000ms customReporting /interfaces /lldp",
3079+
# "2000ms collection /components",
3080+
# "/interfaces",
3081+
# ]
3082+
#
3083+
# ## x509 Certificate to use with TLS connection. If it is not provided, an insecure
3084+
# ## channel will be opened with server
3085+
# ssl_cert = "/etc/telegraf/cert.pem"
3086+
#
3087+
# ## Delay between retry attempts of failed RPC calls or streams. Defaults to 1000ms.
3088+
# ## Failed streams/calls will not be retried if 0 is provided
3089+
# retry_delay = "1000ms"
3090+
#
3091+
# ## To treat all string values as tags, set this to true
3092+
# str_as_tags = false
3093+
3094+
29933095
# # Read metrics from Kafka topic(s)
29943096
# [[inputs.kafka_consumer]]
29953097
# ## kafka servers

0 commit comments

Comments
 (0)